alma-memory 0.5.1__py3-none-any.whl → 0.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- alma/__init__.py +296 -226
- alma/compression/__init__.py +33 -0
- alma/compression/pipeline.py +980 -0
- alma/confidence/__init__.py +47 -47
- alma/confidence/engine.py +540 -540
- alma/confidence/types.py +351 -351
- alma/config/loader.py +157 -157
- alma/consolidation/__init__.py +23 -23
- alma/consolidation/engine.py +678 -678
- alma/consolidation/prompts.py +84 -84
- alma/core.py +1189 -430
- alma/domains/__init__.py +30 -30
- alma/domains/factory.py +359 -359
- alma/domains/schemas.py +448 -448
- alma/domains/types.py +272 -272
- alma/events/__init__.py +75 -75
- alma/events/emitter.py +285 -284
- alma/events/storage_mixin.py +246 -246
- alma/events/types.py +126 -126
- alma/events/webhook.py +425 -425
- alma/exceptions.py +49 -49
- alma/extraction/__init__.py +31 -31
- alma/extraction/auto_learner.py +265 -265
- alma/extraction/extractor.py +420 -420
- alma/graph/__init__.py +106 -106
- alma/graph/backends/__init__.py +32 -32
- alma/graph/backends/kuzu.py +624 -624
- alma/graph/backends/memgraph.py +432 -432
- alma/graph/backends/memory.py +236 -236
- alma/graph/backends/neo4j.py +417 -417
- alma/graph/base.py +159 -159
- alma/graph/extraction.py +198 -198
- alma/graph/store.py +860 -860
- alma/harness/__init__.py +35 -35
- alma/harness/base.py +386 -386
- alma/harness/domains.py +705 -705
- alma/initializer/__init__.py +37 -37
- alma/initializer/initializer.py +418 -418
- alma/initializer/types.py +250 -250
- alma/integration/__init__.py +62 -62
- alma/integration/claude_agents.py +444 -444
- alma/integration/helena.py +423 -423
- alma/integration/victor.py +471 -471
- alma/learning/__init__.py +101 -86
- alma/learning/decay.py +878 -0
- alma/learning/forgetting.py +1446 -1446
- alma/learning/heuristic_extractor.py +390 -390
- alma/learning/protocols.py +374 -374
- alma/learning/validation.py +346 -346
- alma/mcp/__init__.py +123 -45
- alma/mcp/__main__.py +156 -156
- alma/mcp/resources.py +122 -122
- alma/mcp/server.py +955 -591
- alma/mcp/tools.py +3254 -509
- alma/observability/__init__.py +91 -84
- alma/observability/config.py +302 -302
- alma/observability/guidelines.py +170 -0
- alma/observability/logging.py +424 -424
- alma/observability/metrics.py +583 -583
- alma/observability/tracing.py +440 -440
- alma/progress/__init__.py +21 -21
- alma/progress/tracker.py +607 -607
- alma/progress/types.py +250 -250
- alma/retrieval/__init__.py +134 -53
- alma/retrieval/budget.py +525 -0
- alma/retrieval/cache.py +1304 -1061
- alma/retrieval/embeddings.py +202 -202
- alma/retrieval/engine.py +850 -427
- alma/retrieval/modes.py +365 -0
- alma/retrieval/progressive.py +560 -0
- alma/retrieval/scoring.py +344 -344
- alma/retrieval/trust_scoring.py +637 -0
- alma/retrieval/verification.py +797 -0
- alma/session/__init__.py +19 -19
- alma/session/manager.py +442 -399
- alma/session/types.py +288 -288
- alma/storage/__init__.py +101 -90
- alma/storage/archive.py +233 -0
- alma/storage/azure_cosmos.py +1259 -1259
- alma/storage/base.py +1083 -583
- alma/storage/chroma.py +1443 -1443
- alma/storage/constants.py +103 -103
- alma/storage/file_based.py +614 -614
- alma/storage/migrations/__init__.py +21 -21
- alma/storage/migrations/base.py +321 -321
- alma/storage/migrations/runner.py +323 -323
- alma/storage/migrations/version_stores.py +337 -337
- alma/storage/migrations/versions/__init__.py +11 -11
- alma/storage/migrations/versions/v1_0_0.py +373 -373
- alma/storage/migrations/versions/v1_1_0_workflow_context.py +551 -0
- alma/storage/pinecone.py +1080 -1080
- alma/storage/postgresql.py +1948 -1559
- alma/storage/qdrant.py +1306 -1306
- alma/storage/sqlite_local.py +3041 -1457
- alma/testing/__init__.py +46 -46
- alma/testing/factories.py +301 -301
- alma/testing/mocks.py +389 -389
- alma/types.py +292 -264
- alma/utils/__init__.py +19 -0
- alma/utils/tokenizer.py +521 -0
- alma/workflow/__init__.py +83 -0
- alma/workflow/artifacts.py +170 -0
- alma/workflow/checkpoint.py +311 -0
- alma/workflow/context.py +228 -0
- alma/workflow/outcomes.py +189 -0
- alma/workflow/reducers.py +393 -0
- {alma_memory-0.5.1.dist-info → alma_memory-0.7.0.dist-info}/METADATA +210 -72
- alma_memory-0.7.0.dist-info/RECORD +112 -0
- alma_memory-0.5.1.dist-info/RECORD +0 -93
- {alma_memory-0.5.1.dist-info → alma_memory-0.7.0.dist-info}/WHEEL +0 -0
- {alma_memory-0.5.1.dist-info → alma_memory-0.7.0.dist-info}/top_level.txt +0 -0
alma/retrieval/cache.py
CHANGED
|
@@ -1,1061 +1,1304 @@
|
|
|
1
|
-
"""
|
|
2
|
-
ALMA Retrieval Cache.
|
|
3
|
-
|
|
4
|
-
Multi-backend caching layer for retrieval results with TTL-based expiration.
|
|
5
|
-
Supports in-memory and Redis backends with performance monitoring.
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
return
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
|
|
429
|
-
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
|
|
438
|
-
|
|
439
|
-
|
|
440
|
-
|
|
441
|
-
|
|
442
|
-
|
|
443
|
-
|
|
444
|
-
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
|
|
476
|
-
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
self.
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
self.
|
|
496
|
-
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
)
|
|
501
|
-
self._stats.
|
|
502
|
-
|
|
503
|
-
|
|
504
|
-
self.
|
|
505
|
-
|
|
506
|
-
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
|
|
510
|
-
|
|
511
|
-
|
|
512
|
-
|
|
513
|
-
|
|
514
|
-
|
|
515
|
-
|
|
516
|
-
|
|
517
|
-
|
|
518
|
-
|
|
519
|
-
|
|
520
|
-
|
|
521
|
-
|
|
522
|
-
|
|
523
|
-
|
|
524
|
-
|
|
525
|
-
|
|
526
|
-
|
|
527
|
-
|
|
528
|
-
|
|
529
|
-
|
|
530
|
-
|
|
531
|
-
|
|
532
|
-
|
|
533
|
-
|
|
534
|
-
|
|
535
|
-
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
|
|
539
|
-
|
|
540
|
-
|
|
541
|
-
|
|
542
|
-
|
|
543
|
-
|
|
544
|
-
|
|
545
|
-
|
|
546
|
-
|
|
547
|
-
|
|
548
|
-
|
|
549
|
-
|
|
550
|
-
|
|
551
|
-
|
|
552
|
-
|
|
553
|
-
|
|
554
|
-
|
|
555
|
-
|
|
556
|
-
|
|
557
|
-
|
|
558
|
-
|
|
559
|
-
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
|
|
563
|
-
|
|
564
|
-
|
|
565
|
-
|
|
566
|
-
|
|
567
|
-
|
|
568
|
-
|
|
569
|
-
|
|
570
|
-
|
|
571
|
-
|
|
572
|
-
|
|
573
|
-
|
|
574
|
-
|
|
575
|
-
|
|
576
|
-
|
|
577
|
-
|
|
578
|
-
|
|
579
|
-
|
|
580
|
-
|
|
581
|
-
|
|
582
|
-
|
|
583
|
-
|
|
584
|
-
|
|
585
|
-
|
|
586
|
-
|
|
587
|
-
|
|
588
|
-
|
|
589
|
-
|
|
590
|
-
|
|
591
|
-
|
|
592
|
-
"
|
|
593
|
-
|
|
594
|
-
|
|
595
|
-
|
|
596
|
-
|
|
597
|
-
|
|
598
|
-
|
|
599
|
-
|
|
600
|
-
|
|
601
|
-
|
|
602
|
-
|
|
603
|
-
|
|
604
|
-
|
|
605
|
-
|
|
606
|
-
|
|
607
|
-
|
|
608
|
-
|
|
609
|
-
|
|
610
|
-
|
|
611
|
-
|
|
612
|
-
|
|
613
|
-
|
|
614
|
-
|
|
615
|
-
|
|
616
|
-
|
|
617
|
-
|
|
618
|
-
|
|
619
|
-
|
|
620
|
-
|
|
621
|
-
|
|
622
|
-
|
|
623
|
-
|
|
624
|
-
|
|
625
|
-
|
|
626
|
-
|
|
627
|
-
|
|
628
|
-
|
|
629
|
-
|
|
630
|
-
|
|
631
|
-
|
|
632
|
-
|
|
633
|
-
|
|
634
|
-
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
|
|
638
|
-
|
|
639
|
-
|
|
640
|
-
|
|
641
|
-
|
|
642
|
-
|
|
643
|
-
|
|
644
|
-
|
|
645
|
-
|
|
646
|
-
|
|
647
|
-
|
|
648
|
-
|
|
649
|
-
|
|
650
|
-
|
|
651
|
-
|
|
652
|
-
|
|
653
|
-
|
|
654
|
-
|
|
655
|
-
|
|
656
|
-
|
|
657
|
-
|
|
658
|
-
|
|
659
|
-
|
|
660
|
-
|
|
661
|
-
|
|
662
|
-
|
|
663
|
-
|
|
664
|
-
|
|
665
|
-
|
|
666
|
-
|
|
667
|
-
|
|
668
|
-
|
|
669
|
-
|
|
670
|
-
|
|
671
|
-
|
|
672
|
-
|
|
673
|
-
|
|
674
|
-
|
|
675
|
-
|
|
676
|
-
|
|
677
|
-
|
|
678
|
-
|
|
679
|
-
|
|
680
|
-
|
|
681
|
-
|
|
682
|
-
|
|
683
|
-
|
|
684
|
-
|
|
685
|
-
|
|
686
|
-
|
|
687
|
-
|
|
688
|
-
|
|
689
|
-
|
|
690
|
-
|
|
691
|
-
|
|
692
|
-
|
|
693
|
-
|
|
694
|
-
|
|
695
|
-
|
|
696
|
-
|
|
697
|
-
|
|
698
|
-
|
|
699
|
-
|
|
700
|
-
|
|
701
|
-
|
|
702
|
-
|
|
703
|
-
|
|
704
|
-
|
|
705
|
-
|
|
706
|
-
|
|
707
|
-
|
|
708
|
-
|
|
709
|
-
|
|
710
|
-
|
|
711
|
-
|
|
712
|
-
|
|
713
|
-
|
|
714
|
-
|
|
715
|
-
|
|
716
|
-
|
|
717
|
-
|
|
718
|
-
|
|
719
|
-
|
|
720
|
-
|
|
721
|
-
|
|
722
|
-
|
|
723
|
-
|
|
724
|
-
|
|
725
|
-
|
|
726
|
-
|
|
727
|
-
|
|
728
|
-
|
|
729
|
-
|
|
730
|
-
|
|
731
|
-
|
|
732
|
-
|
|
733
|
-
|
|
734
|
-
|
|
735
|
-
|
|
736
|
-
|
|
737
|
-
|
|
738
|
-
|
|
739
|
-
|
|
740
|
-
|
|
741
|
-
|
|
742
|
-
|
|
743
|
-
|
|
744
|
-
|
|
745
|
-
|
|
746
|
-
|
|
747
|
-
|
|
748
|
-
|
|
749
|
-
|
|
750
|
-
|
|
751
|
-
|
|
752
|
-
|
|
753
|
-
|
|
754
|
-
|
|
755
|
-
|
|
756
|
-
|
|
757
|
-
|
|
758
|
-
|
|
759
|
-
|
|
760
|
-
]
|
|
761
|
-
|
|
762
|
-
|
|
763
|
-
|
|
764
|
-
|
|
765
|
-
|
|
766
|
-
|
|
767
|
-
|
|
768
|
-
|
|
769
|
-
|
|
770
|
-
|
|
771
|
-
|
|
772
|
-
|
|
773
|
-
|
|
774
|
-
|
|
775
|
-
|
|
776
|
-
|
|
777
|
-
|
|
778
|
-
|
|
779
|
-
|
|
780
|
-
|
|
781
|
-
|
|
782
|
-
|
|
783
|
-
|
|
784
|
-
|
|
785
|
-
|
|
786
|
-
|
|
787
|
-
|
|
788
|
-
|
|
789
|
-
|
|
790
|
-
|
|
791
|
-
|
|
792
|
-
|
|
793
|
-
|
|
794
|
-
|
|
795
|
-
|
|
796
|
-
|
|
797
|
-
|
|
798
|
-
|
|
799
|
-
|
|
800
|
-
|
|
801
|
-
|
|
802
|
-
|
|
803
|
-
|
|
804
|
-
|
|
805
|
-
|
|
806
|
-
|
|
807
|
-
|
|
808
|
-
|
|
809
|
-
|
|
810
|
-
|
|
811
|
-
|
|
812
|
-
|
|
813
|
-
|
|
814
|
-
|
|
815
|
-
|
|
816
|
-
|
|
817
|
-
|
|
818
|
-
|
|
819
|
-
|
|
820
|
-
|
|
821
|
-
|
|
822
|
-
|
|
823
|
-
|
|
824
|
-
|
|
825
|
-
|
|
826
|
-
|
|
827
|
-
|
|
828
|
-
|
|
829
|
-
|
|
830
|
-
|
|
831
|
-
|
|
832
|
-
|
|
833
|
-
|
|
834
|
-
|
|
835
|
-
|
|
836
|
-
|
|
837
|
-
|
|
838
|
-
|
|
839
|
-
|
|
840
|
-
|
|
841
|
-
|
|
842
|
-
|
|
843
|
-
|
|
844
|
-
|
|
845
|
-
|
|
846
|
-
|
|
847
|
-
|
|
848
|
-
|
|
849
|
-
|
|
850
|
-
|
|
851
|
-
|
|
852
|
-
|
|
853
|
-
|
|
854
|
-
|
|
855
|
-
|
|
856
|
-
|
|
857
|
-
|
|
858
|
-
|
|
859
|
-
|
|
860
|
-
|
|
861
|
-
|
|
862
|
-
|
|
863
|
-
|
|
864
|
-
|
|
865
|
-
def
|
|
866
|
-
|
|
867
|
-
|
|
868
|
-
|
|
869
|
-
|
|
870
|
-
|
|
871
|
-
|
|
872
|
-
|
|
873
|
-
|
|
874
|
-
|
|
875
|
-
|
|
876
|
-
|
|
877
|
-
|
|
878
|
-
|
|
879
|
-
|
|
880
|
-
|
|
881
|
-
|
|
882
|
-
|
|
883
|
-
|
|
884
|
-
|
|
885
|
-
|
|
886
|
-
|
|
887
|
-
|
|
888
|
-
|
|
889
|
-
|
|
890
|
-
|
|
891
|
-
|
|
892
|
-
|
|
893
|
-
|
|
894
|
-
|
|
895
|
-
|
|
896
|
-
|
|
897
|
-
|
|
898
|
-
|
|
899
|
-
|
|
900
|
-
|
|
901
|
-
|
|
902
|
-
|
|
903
|
-
|
|
904
|
-
|
|
905
|
-
|
|
906
|
-
|
|
907
|
-
|
|
908
|
-
|
|
909
|
-
|
|
910
|
-
|
|
911
|
-
|
|
912
|
-
|
|
913
|
-
|
|
914
|
-
|
|
915
|
-
|
|
916
|
-
|
|
917
|
-
|
|
918
|
-
|
|
919
|
-
|
|
920
|
-
|
|
921
|
-
|
|
922
|
-
|
|
923
|
-
|
|
924
|
-
|
|
925
|
-
|
|
926
|
-
|
|
927
|
-
|
|
928
|
-
|
|
929
|
-
|
|
930
|
-
|
|
931
|
-
|
|
932
|
-
|
|
933
|
-
|
|
934
|
-
|
|
935
|
-
|
|
936
|
-
|
|
937
|
-
|
|
938
|
-
|
|
939
|
-
|
|
940
|
-
|
|
941
|
-
|
|
942
|
-
|
|
943
|
-
|
|
944
|
-
|
|
945
|
-
|
|
946
|
-
|
|
947
|
-
|
|
948
|
-
|
|
949
|
-
|
|
950
|
-
|
|
951
|
-
|
|
952
|
-
|
|
953
|
-
|
|
954
|
-
|
|
955
|
-
|
|
956
|
-
|
|
957
|
-
|
|
958
|
-
|
|
959
|
-
|
|
960
|
-
|
|
961
|
-
|
|
962
|
-
|
|
963
|
-
|
|
964
|
-
|
|
965
|
-
|
|
966
|
-
|
|
967
|
-
|
|
968
|
-
|
|
969
|
-
|
|
970
|
-
|
|
971
|
-
|
|
972
|
-
|
|
973
|
-
|
|
974
|
-
|
|
975
|
-
|
|
976
|
-
|
|
977
|
-
|
|
978
|
-
|
|
979
|
-
|
|
980
|
-
|
|
981
|
-
|
|
982
|
-
|
|
983
|
-
|
|
984
|
-
|
|
985
|
-
|
|
986
|
-
|
|
987
|
-
|
|
988
|
-
|
|
989
|
-
|
|
990
|
-
|
|
991
|
-
|
|
992
|
-
|
|
993
|
-
|
|
994
|
-
|
|
995
|
-
|
|
996
|
-
|
|
997
|
-
|
|
998
|
-
|
|
999
|
-
|
|
1000
|
-
|
|
1001
|
-
|
|
1002
|
-
|
|
1003
|
-
|
|
1004
|
-
|
|
1005
|
-
|
|
1006
|
-
|
|
1007
|
-
|
|
1008
|
-
|
|
1009
|
-
|
|
1010
|
-
|
|
1011
|
-
|
|
1012
|
-
|
|
1013
|
-
|
|
1014
|
-
|
|
1015
|
-
|
|
1016
|
-
|
|
1017
|
-
|
|
1018
|
-
|
|
1019
|
-
|
|
1020
|
-
|
|
1021
|
-
|
|
1022
|
-
|
|
1023
|
-
|
|
1024
|
-
|
|
1025
|
-
|
|
1026
|
-
|
|
1027
|
-
|
|
1028
|
-
|
|
1029
|
-
|
|
1030
|
-
|
|
1031
|
-
|
|
1032
|
-
|
|
1033
|
-
|
|
1034
|
-
|
|
1035
|
-
|
|
1036
|
-
|
|
1037
|
-
|
|
1038
|
-
|
|
1039
|
-
|
|
1040
|
-
|
|
1041
|
-
|
|
1042
|
-
|
|
1043
|
-
|
|
1044
|
-
|
|
1045
|
-
|
|
1046
|
-
|
|
1047
|
-
|
|
1048
|
-
|
|
1049
|
-
|
|
1050
|
-
|
|
1051
|
-
|
|
1052
|
-
|
|
1053
|
-
|
|
1054
|
-
|
|
1055
|
-
|
|
1056
|
-
|
|
1057
|
-
|
|
1058
|
-
|
|
1059
|
-
|
|
1060
|
-
|
|
1061
|
-
|
|
1
|
+
"""
|
|
2
|
+
ALMA Retrieval Cache.
|
|
3
|
+
|
|
4
|
+
Multi-backend caching layer for retrieval results with TTL-based expiration.
|
|
5
|
+
Supports in-memory and Redis backends with performance monitoring.
|
|
6
|
+
|
|
7
|
+
Key Features:
|
|
8
|
+
- Collision-resistant cache key generation using SHA-256
|
|
9
|
+
- Namespace support for multi-agent/multi-tenant isolation
|
|
10
|
+
- TTL-based expiration with configurable cleanup
|
|
11
|
+
- LRU eviction when max entries reached
|
|
12
|
+
- Performance metrics tracking
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
import hashlib
|
|
16
|
+
import json
|
|
17
|
+
import logging
|
|
18
|
+
import struct
|
|
19
|
+
import threading
|
|
20
|
+
import time
|
|
21
|
+
from abc import ABC, abstractmethod
|
|
22
|
+
from dataclasses import dataclass, field
|
|
23
|
+
from datetime import datetime, timezone
|
|
24
|
+
from typing import Any, Callable, Dict, List, Optional, Tuple
|
|
25
|
+
|
|
26
|
+
from alma.types import MemorySlice
|
|
27
|
+
|
|
28
|
+
logger = logging.getLogger(__name__)
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
# ==================== CACHE KEY GENERATION ====================
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class CacheKeyGenerator:
|
|
35
|
+
"""
|
|
36
|
+
Collision-resistant cache key generator with namespace support.
|
|
37
|
+
|
|
38
|
+
Uses SHA-256 hashing with length-prefixed encoding to prevent
|
|
39
|
+
delimiter-based collision attacks. Supports namespaces for
|
|
40
|
+
multi-agent/multi-tenant isolation.
|
|
41
|
+
|
|
42
|
+
Key Structure:
|
|
43
|
+
{namespace}:{version}:{full_sha256_hash}
|
|
44
|
+
|
|
45
|
+
The hash is computed over length-prefixed components to ensure
|
|
46
|
+
that "a|b" + "c" cannot collide with "a" + "b|c".
|
|
47
|
+
"""
|
|
48
|
+
|
|
49
|
+
# Version for cache key format - increment if algorithm changes
|
|
50
|
+
KEY_VERSION = "v1"
|
|
51
|
+
|
|
52
|
+
# Default namespace for single-agent deployments
|
|
53
|
+
DEFAULT_NAMESPACE = "alma"
|
|
54
|
+
|
|
55
|
+
def __init__(self, namespace: Optional[str] = None):
|
|
56
|
+
"""
|
|
57
|
+
Initialize the cache key generator.
|
|
58
|
+
|
|
59
|
+
Args:
|
|
60
|
+
namespace: Optional namespace for cache isolation.
|
|
61
|
+
Useful for multi-agent or multi-tenant deployments.
|
|
62
|
+
Defaults to "alma".
|
|
63
|
+
"""
|
|
64
|
+
self.namespace = namespace or self.DEFAULT_NAMESPACE
|
|
65
|
+
|
|
66
|
+
@staticmethod
|
|
67
|
+
def _length_prefix_encode(value: str) -> bytes:
|
|
68
|
+
"""
|
|
69
|
+
Encode a string with its length prefix for collision resistance.
|
|
70
|
+
|
|
71
|
+
This prevents collision attacks where "a|b" + "c" could match "a" + "b|c"
|
|
72
|
+
when using simple delimiter-based concatenation.
|
|
73
|
+
|
|
74
|
+
Args:
|
|
75
|
+
value: String to encode
|
|
76
|
+
|
|
77
|
+
Returns:
|
|
78
|
+
Bytes with 4-byte big-endian length prefix followed by UTF-8 encoded value
|
|
79
|
+
"""
|
|
80
|
+
encoded = value.encode("utf-8")
|
|
81
|
+
length = len(encoded)
|
|
82
|
+
return struct.pack(">I", length) + encoded
|
|
83
|
+
|
|
84
|
+
@staticmethod
|
|
85
|
+
def _normalize_query(query: str) -> str:
|
|
86
|
+
"""
|
|
87
|
+
Normalize query string for consistent cache key generation.
|
|
88
|
+
|
|
89
|
+
- Converts to lowercase
|
|
90
|
+
- Strips leading/trailing whitespace
|
|
91
|
+
- Normalizes internal whitespace to single spaces
|
|
92
|
+
|
|
93
|
+
Args:
|
|
94
|
+
query: Raw query string
|
|
95
|
+
|
|
96
|
+
Returns:
|
|
97
|
+
Normalized query string
|
|
98
|
+
"""
|
|
99
|
+
return " ".join(query.lower().split())
|
|
100
|
+
|
|
101
|
+
def generate(
|
|
102
|
+
self,
|
|
103
|
+
query: str,
|
|
104
|
+
agent: str,
|
|
105
|
+
project_id: str,
|
|
106
|
+
user_id: Optional[str] = None,
|
|
107
|
+
top_k: int = 5,
|
|
108
|
+
extra_context: Optional[Dict[str, str]] = None,
|
|
109
|
+
) -> str:
|
|
110
|
+
"""
|
|
111
|
+
Generate a collision-resistant cache key.
|
|
112
|
+
|
|
113
|
+
Uses length-prefixed encoding of all components followed by SHA-256
|
|
114
|
+
hashing to prevent delimiter-based collision attacks.
|
|
115
|
+
|
|
116
|
+
Args:
|
|
117
|
+
query: The search query (will be normalized)
|
|
118
|
+
agent: Agent identifier
|
|
119
|
+
project_id: Project identifier
|
|
120
|
+
user_id: Optional user identifier
|
|
121
|
+
top_k: Number of results requested
|
|
122
|
+
extra_context: Optional extra context for key generation
|
|
123
|
+
|
|
124
|
+
Returns:
|
|
125
|
+
Cache key in format: {namespace}:{version}:{sha256_hash}
|
|
126
|
+
"""
|
|
127
|
+
# Normalize query
|
|
128
|
+
normalized_query = self._normalize_query(query)
|
|
129
|
+
|
|
130
|
+
# Build the hash input using length-prefixed encoding
|
|
131
|
+
# This ensures "a|b" + "c" cannot collide with "a" + "b|c"
|
|
132
|
+
hash_input = b""
|
|
133
|
+
hash_input += self._length_prefix_encode(normalized_query)
|
|
134
|
+
hash_input += self._length_prefix_encode(agent)
|
|
135
|
+
hash_input += self._length_prefix_encode(project_id)
|
|
136
|
+
hash_input += self._length_prefix_encode(user_id or "")
|
|
137
|
+
hash_input += struct.pack(">I", top_k) # 4-byte big-endian integer
|
|
138
|
+
|
|
139
|
+
# Add extra context if provided (sorted for determinism)
|
|
140
|
+
if extra_context:
|
|
141
|
+
for key in sorted(extra_context.keys()):
|
|
142
|
+
hash_input += self._length_prefix_encode(key)
|
|
143
|
+
hash_input += self._length_prefix_encode(extra_context[key])
|
|
144
|
+
|
|
145
|
+
# Compute full SHA-256 hash (64 hex chars = 256 bits)
|
|
146
|
+
hash_hex = hashlib.sha256(hash_input).hexdigest()
|
|
147
|
+
|
|
148
|
+
# Return namespaced key with version
|
|
149
|
+
return f"{self.namespace}:{self.KEY_VERSION}:{hash_hex}"
|
|
150
|
+
|
|
151
|
+
def generate_pattern(
|
|
152
|
+
self,
|
|
153
|
+
agent: Optional[str] = None,
|
|
154
|
+
project_id: Optional[str] = None,
|
|
155
|
+
) -> str:
|
|
156
|
+
"""
|
|
157
|
+
Generate a pattern for bulk cache invalidation.
|
|
158
|
+
|
|
159
|
+
This is primarily useful for Redis-style pattern matching.
|
|
160
|
+
For in-memory caches, use the index-based invalidation instead.
|
|
161
|
+
|
|
162
|
+
Args:
|
|
163
|
+
agent: Optional agent to match
|
|
164
|
+
project_id: Optional project to match
|
|
165
|
+
|
|
166
|
+
Returns:
|
|
167
|
+
Pattern string (e.g., "alma:v1:*" for all keys in namespace)
|
|
168
|
+
"""
|
|
169
|
+
return f"{self.namespace}:{self.KEY_VERSION}:*"
|
|
170
|
+
|
|
171
|
+
def parse_key(self, key: str) -> Tuple[str, str, str]:
|
|
172
|
+
"""
|
|
173
|
+
Parse a cache key into its components.
|
|
174
|
+
|
|
175
|
+
Args:
|
|
176
|
+
key: Cache key to parse
|
|
177
|
+
|
|
178
|
+
Returns:
|
|
179
|
+
Tuple of (namespace, version, hash)
|
|
180
|
+
|
|
181
|
+
Raises:
|
|
182
|
+
ValueError: If key format is invalid
|
|
183
|
+
"""
|
|
184
|
+
parts = key.split(":", 2)
|
|
185
|
+
if len(parts) != 3:
|
|
186
|
+
raise ValueError(f"Invalid cache key format: {key}")
|
|
187
|
+
return (parts[0], parts[1], parts[2])
|
|
188
|
+
|
|
189
|
+
def is_valid_key(self, key: str) -> bool:
|
|
190
|
+
"""
|
|
191
|
+
Check if a key matches this generator's namespace and version.
|
|
192
|
+
|
|
193
|
+
Args:
|
|
194
|
+
key: Cache key to validate
|
|
195
|
+
|
|
196
|
+
Returns:
|
|
197
|
+
True if key is valid for this generator
|
|
198
|
+
"""
|
|
199
|
+
try:
|
|
200
|
+
namespace, version, hash_part = self.parse_key(key)
|
|
201
|
+
return (
|
|
202
|
+
namespace == self.namespace
|
|
203
|
+
and version == self.KEY_VERSION
|
|
204
|
+
and len(hash_part) == 64 # SHA-256 hex
|
|
205
|
+
)
|
|
206
|
+
except ValueError:
|
|
207
|
+
return False
|
|
208
|
+
|
|
209
|
+
|
|
210
|
+
# Global default key generator
|
|
211
|
+
_default_key_generator = CacheKeyGenerator()
|
|
212
|
+
|
|
213
|
+
|
|
214
|
+
# ==================== DATA STRUCTURES ====================
|
|
215
|
+
|
|
216
|
+
|
|
217
|
+
@dataclass
|
|
218
|
+
class CacheEntry:
|
|
219
|
+
"""A cached retrieval result with metadata."""
|
|
220
|
+
|
|
221
|
+
result: MemorySlice
|
|
222
|
+
created_at: float # time.time() timestamp
|
|
223
|
+
expires_at: float
|
|
224
|
+
hit_count: int = 0
|
|
225
|
+
query_hash: str = ""
|
|
226
|
+
# Metadata for selective invalidation
|
|
227
|
+
agent: str = ""
|
|
228
|
+
project_id: str = ""
|
|
229
|
+
user_id: str = ""
|
|
230
|
+
|
|
231
|
+
|
|
232
|
+
@dataclass
|
|
233
|
+
class CacheStats:
|
|
234
|
+
"""Statistics about cache performance."""
|
|
235
|
+
|
|
236
|
+
hits: int = 0
|
|
237
|
+
misses: int = 0
|
|
238
|
+
evictions: int = 0
|
|
239
|
+
current_size: int = 0
|
|
240
|
+
max_size: int = 0
|
|
241
|
+
# Performance metrics
|
|
242
|
+
avg_get_time_ms: float = 0.0
|
|
243
|
+
avg_set_time_ms: float = 0.0
|
|
244
|
+
p95_get_time_ms: float = 0.0
|
|
245
|
+
p95_set_time_ms: float = 0.0
|
|
246
|
+
total_get_calls: int = 0
|
|
247
|
+
total_set_calls: int = 0
|
|
248
|
+
|
|
249
|
+
@property
|
|
250
|
+
def hit_rate(self) -> float:
|
|
251
|
+
"""Calculate cache hit rate."""
|
|
252
|
+
total = self.hits + self.misses
|
|
253
|
+
return self.hits / total if total > 0 else 0.0
|
|
254
|
+
|
|
255
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
256
|
+
"""Convert to dictionary."""
|
|
257
|
+
return {
|
|
258
|
+
"hits": self.hits,
|
|
259
|
+
"misses": self.misses,
|
|
260
|
+
"evictions": self.evictions,
|
|
261
|
+
"hit_rate": f"{self.hit_rate:.2%}",
|
|
262
|
+
"current_size": self.current_size,
|
|
263
|
+
"max_size": self.max_size,
|
|
264
|
+
"avg_get_time_ms": round(self.avg_get_time_ms, 2),
|
|
265
|
+
"avg_set_time_ms": round(self.avg_set_time_ms, 2),
|
|
266
|
+
"p95_get_time_ms": round(self.p95_get_time_ms, 2),
|
|
267
|
+
"p95_set_time_ms": round(self.p95_set_time_ms, 2),
|
|
268
|
+
"total_get_calls": self.total_get_calls,
|
|
269
|
+
"total_set_calls": self.total_set_calls,
|
|
270
|
+
}
|
|
271
|
+
|
|
272
|
+
|
|
273
|
+
@dataclass
|
|
274
|
+
class PerformanceMetrics:
|
|
275
|
+
"""Tracks timing metrics for performance analysis."""
|
|
276
|
+
|
|
277
|
+
get_times: List[float] = field(default_factory=list)
|
|
278
|
+
set_times: List[float] = field(default_factory=list)
|
|
279
|
+
max_samples: int = 1000
|
|
280
|
+
|
|
281
|
+
def record_get(self, duration_ms: float):
|
|
282
|
+
"""Record a get operation time."""
|
|
283
|
+
self.get_times.append(duration_ms)
|
|
284
|
+
if len(self.get_times) > self.max_samples:
|
|
285
|
+
self.get_times = self.get_times[-self.max_samples :]
|
|
286
|
+
|
|
287
|
+
def record_set(self, duration_ms: float):
|
|
288
|
+
"""Record a set operation time."""
|
|
289
|
+
self.set_times.append(duration_ms)
|
|
290
|
+
if len(self.set_times) > self.max_samples:
|
|
291
|
+
self.set_times = self.set_times[-self.max_samples :]
|
|
292
|
+
|
|
293
|
+
def get_percentile(self, times: List[float], percentile: float) -> float:
|
|
294
|
+
"""Calculate percentile from timing data."""
|
|
295
|
+
if not times:
|
|
296
|
+
return 0.0
|
|
297
|
+
sorted_times = sorted(times)
|
|
298
|
+
idx = int(len(sorted_times) * percentile / 100)
|
|
299
|
+
return sorted_times[min(idx, len(sorted_times) - 1)]
|
|
300
|
+
|
|
301
|
+
def get_avg(self, times: List[float]) -> float:
|
|
302
|
+
"""Calculate average from timing data."""
|
|
303
|
+
if not times:
|
|
304
|
+
return 0.0
|
|
305
|
+
return sum(times) / len(times)
|
|
306
|
+
|
|
307
|
+
|
|
308
|
+
# ==================== CACHE INTERFACE ====================
|
|
309
|
+
|
|
310
|
+
|
|
311
|
+
class CacheBackend(ABC):
|
|
312
|
+
"""Abstract interface for cache backends."""
|
|
313
|
+
|
|
314
|
+
@abstractmethod
|
|
315
|
+
def get(
|
|
316
|
+
self,
|
|
317
|
+
query: str,
|
|
318
|
+
agent: str,
|
|
319
|
+
project_id: str,
|
|
320
|
+
user_id: Optional[str] = None,
|
|
321
|
+
top_k: int = 5,
|
|
322
|
+
) -> Optional[MemorySlice]:
|
|
323
|
+
"""Get cached result if available."""
|
|
324
|
+
pass
|
|
325
|
+
|
|
326
|
+
@abstractmethod
|
|
327
|
+
def set(
|
|
328
|
+
self,
|
|
329
|
+
query: str,
|
|
330
|
+
agent: str,
|
|
331
|
+
project_id: str,
|
|
332
|
+
result: MemorySlice,
|
|
333
|
+
user_id: Optional[str] = None,
|
|
334
|
+
top_k: int = 5,
|
|
335
|
+
ttl_override: Optional[int] = None,
|
|
336
|
+
) -> None:
|
|
337
|
+
"""Cache a retrieval result."""
|
|
338
|
+
pass
|
|
339
|
+
|
|
340
|
+
@abstractmethod
|
|
341
|
+
def invalidate(
|
|
342
|
+
self,
|
|
343
|
+
agent: Optional[str] = None,
|
|
344
|
+
project_id: Optional[str] = None,
|
|
345
|
+
) -> int:
|
|
346
|
+
"""Invalidate cache entries. Returns count of invalidated entries."""
|
|
347
|
+
pass
|
|
348
|
+
|
|
349
|
+
@abstractmethod
|
|
350
|
+
def get_stats(self) -> CacheStats:
|
|
351
|
+
"""Get cache statistics."""
|
|
352
|
+
pass
|
|
353
|
+
|
|
354
|
+
@abstractmethod
|
|
355
|
+
def clear(self) -> None:
|
|
356
|
+
"""Clear all cache entries."""
|
|
357
|
+
pass
|
|
358
|
+
|
|
359
|
+
|
|
360
|
+
# ==================== IN-MEMORY CACHE ====================
|
|
361
|
+
|
|
362
|
+
|
|
363
|
+
class RetrievalCache(CacheBackend):
|
|
364
|
+
"""
|
|
365
|
+
In-memory cache for retrieval results.
|
|
366
|
+
|
|
367
|
+
Features:
|
|
368
|
+
- Collision-resistant cache key generation using SHA-256
|
|
369
|
+
- Namespace support for multi-agent/multi-tenant isolation
|
|
370
|
+
- TTL-based expiration
|
|
371
|
+
- LRU eviction when max size reached
|
|
372
|
+
- Thread-safe operations
|
|
373
|
+
- Selective cache invalidation by agent/project
|
|
374
|
+
- Performance metrics tracking
|
|
375
|
+
- Monitoring hooks
|
|
376
|
+
"""
|
|
377
|
+
|
|
378
|
+
def __init__(
|
|
379
|
+
self,
|
|
380
|
+
ttl_seconds: int = 300,
|
|
381
|
+
max_entries: int = 1000,
|
|
382
|
+
cleanup_interval: int = 60,
|
|
383
|
+
enable_metrics: bool = True,
|
|
384
|
+
namespace: Optional[str] = None,
|
|
385
|
+
):
|
|
386
|
+
"""
|
|
387
|
+
Initialize cache.
|
|
388
|
+
|
|
389
|
+
Args:
|
|
390
|
+
ttl_seconds: Time-to-live for cache entries (default: 5 minutes)
|
|
391
|
+
max_entries: Maximum number of cached entries before eviction
|
|
392
|
+
cleanup_interval: Seconds between cleanup cycles for expired entries
|
|
393
|
+
enable_metrics: Whether to track performance metrics
|
|
394
|
+
namespace: Optional namespace for cache isolation (default: "alma")
|
|
395
|
+
"""
|
|
396
|
+
self.ttl = ttl_seconds
|
|
397
|
+
self.max_entries = max_entries
|
|
398
|
+
self.cleanup_interval = cleanup_interval
|
|
399
|
+
self.enable_metrics = enable_metrics
|
|
400
|
+
self.namespace = namespace
|
|
401
|
+
|
|
402
|
+
# Initialize collision-resistant key generator with namespace
|
|
403
|
+
self._key_generator = CacheKeyGenerator(namespace=namespace)
|
|
404
|
+
|
|
405
|
+
self._cache: Dict[str, CacheEntry] = {}
|
|
406
|
+
# Index for selective invalidation: agent -> set of cache keys
|
|
407
|
+
self._agent_index: Dict[str, set] = {}
|
|
408
|
+
# Index for selective invalidation: project_id -> set of cache keys
|
|
409
|
+
self._project_index: Dict[str, set] = {}
|
|
410
|
+
|
|
411
|
+
self._lock = threading.RLock()
|
|
412
|
+
self._stats = CacheStats(max_size=max_entries)
|
|
413
|
+
self._metrics = PerformanceMetrics() if enable_metrics else None
|
|
414
|
+
self._last_cleanup = time.time()
|
|
415
|
+
|
|
416
|
+
# Monitoring hooks
|
|
417
|
+
self._on_hit: Optional[Callable[[str, float], None]] = None
|
|
418
|
+
self._on_miss: Optional[Callable[[str], None]] = None
|
|
419
|
+
self._on_eviction: Optional[Callable[[int], None]] = None
|
|
420
|
+
|
|
421
|
+
def set_hooks(
|
|
422
|
+
self,
|
|
423
|
+
on_hit: Optional[Callable[[str, float], None]] = None,
|
|
424
|
+
on_miss: Optional[Callable[[str], None]] = None,
|
|
425
|
+
on_eviction: Optional[Callable[[int], None]] = None,
|
|
426
|
+
):
|
|
427
|
+
"""
|
|
428
|
+
Set monitoring hooks for cache events.
|
|
429
|
+
|
|
430
|
+
Args:
|
|
431
|
+
on_hit: Called on cache hit with (query_hash, latency_ms)
|
|
432
|
+
on_miss: Called on cache miss with (query_hash)
|
|
433
|
+
on_eviction: Called on eviction with (count)
|
|
434
|
+
"""
|
|
435
|
+
self._on_hit = on_hit
|
|
436
|
+
self._on_miss = on_miss
|
|
437
|
+
self._on_eviction = on_eviction
|
|
438
|
+
|
|
439
|
+
def _generate_key(
|
|
440
|
+
self,
|
|
441
|
+
query: str,
|
|
442
|
+
agent: str,
|
|
443
|
+
project_id: str,
|
|
444
|
+
user_id: Optional[str] = None,
|
|
445
|
+
top_k: int = 5,
|
|
446
|
+
) -> str:
|
|
447
|
+
"""
|
|
448
|
+
Generate a collision-resistant cache key for the query parameters.
|
|
449
|
+
|
|
450
|
+
Uses the CacheKeyGenerator with length-prefixed encoding and full
|
|
451
|
+
SHA-256 hashing to prevent delimiter-based collision attacks.
|
|
452
|
+
|
|
453
|
+
Args:
|
|
454
|
+
query: The search query (will be normalized)
|
|
455
|
+
agent: Agent identifier
|
|
456
|
+
project_id: Project identifier
|
|
457
|
+
user_id: Optional user identifier
|
|
458
|
+
top_k: Number of results requested
|
|
459
|
+
|
|
460
|
+
Returns:
|
|
461
|
+
Cache key in format: {namespace}:{version}:{sha256_hash}
|
|
462
|
+
"""
|
|
463
|
+
return self._key_generator.generate(
|
|
464
|
+
query=query,
|
|
465
|
+
agent=agent,
|
|
466
|
+
project_id=project_id,
|
|
467
|
+
user_id=user_id,
|
|
468
|
+
top_k=top_k,
|
|
469
|
+
)
|
|
470
|
+
|
|
471
|
+
def get(
|
|
472
|
+
self,
|
|
473
|
+
query: str,
|
|
474
|
+
agent: str,
|
|
475
|
+
project_id: str,
|
|
476
|
+
user_id: Optional[str] = None,
|
|
477
|
+
top_k: int = 5,
|
|
478
|
+
) -> Optional[MemorySlice]:
|
|
479
|
+
"""Get cached result if available and not expired."""
|
|
480
|
+
start_time = time.time()
|
|
481
|
+
key = self._generate_key(query, agent, project_id, user_id, top_k)
|
|
482
|
+
now = time.time()
|
|
483
|
+
|
|
484
|
+
with self._lock:
|
|
485
|
+
# Periodic cleanup
|
|
486
|
+
if now - self._last_cleanup > self.cleanup_interval:
|
|
487
|
+
self._cleanup_expired()
|
|
488
|
+
|
|
489
|
+
entry = self._cache.get(key)
|
|
490
|
+
|
|
491
|
+
if entry is None:
|
|
492
|
+
self._stats.misses += 1
|
|
493
|
+
if self._on_miss:
|
|
494
|
+
self._on_miss(key)
|
|
495
|
+
self._record_get_time(start_time)
|
|
496
|
+
return None
|
|
497
|
+
|
|
498
|
+
if now > entry.expires_at:
|
|
499
|
+
# Entry expired
|
|
500
|
+
self._remove_entry(key, entry)
|
|
501
|
+
self._stats.misses += 1
|
|
502
|
+
if self._on_miss:
|
|
503
|
+
self._on_miss(key)
|
|
504
|
+
self._record_get_time(start_time)
|
|
505
|
+
return None
|
|
506
|
+
|
|
507
|
+
# Cache hit
|
|
508
|
+
entry.hit_count += 1
|
|
509
|
+
self._stats.hits += 1
|
|
510
|
+
latency_ms = (time.time() - start_time) * 1000
|
|
511
|
+
if self._on_hit:
|
|
512
|
+
self._on_hit(key, latency_ms)
|
|
513
|
+
self._record_get_time(start_time)
|
|
514
|
+
logger.debug(f"Cache hit for query: {query[:50]}...")
|
|
515
|
+
return entry.result
|
|
516
|
+
|
|
517
|
+
def set(
|
|
518
|
+
self,
|
|
519
|
+
query: str,
|
|
520
|
+
agent: str,
|
|
521
|
+
project_id: str,
|
|
522
|
+
result: MemorySlice,
|
|
523
|
+
user_id: Optional[str] = None,
|
|
524
|
+
top_k: int = 5,
|
|
525
|
+
ttl_override: Optional[int] = None,
|
|
526
|
+
) -> None:
|
|
527
|
+
"""Cache a retrieval result."""
|
|
528
|
+
start_time = time.time()
|
|
529
|
+
key = self._generate_key(query, agent, project_id, user_id, top_k)
|
|
530
|
+
now = time.time()
|
|
531
|
+
ttl = ttl_override or self.ttl
|
|
532
|
+
|
|
533
|
+
with self._lock:
|
|
534
|
+
# Check if we need to evict entries
|
|
535
|
+
if len(self._cache) >= self.max_entries and key not in self._cache:
|
|
536
|
+
self._evict_lru()
|
|
537
|
+
|
|
538
|
+
entry = CacheEntry(
|
|
539
|
+
result=result,
|
|
540
|
+
created_at=now,
|
|
541
|
+
expires_at=now + ttl,
|
|
542
|
+
hit_count=0,
|
|
543
|
+
query_hash=key,
|
|
544
|
+
agent=agent,
|
|
545
|
+
project_id=project_id,
|
|
546
|
+
user_id=user_id or "",
|
|
547
|
+
)
|
|
548
|
+
|
|
549
|
+
self._cache[key] = entry
|
|
550
|
+
|
|
551
|
+
# Update indexes
|
|
552
|
+
if agent not in self._agent_index:
|
|
553
|
+
self._agent_index[agent] = set()
|
|
554
|
+
self._agent_index[agent].add(key)
|
|
555
|
+
|
|
556
|
+
if project_id not in self._project_index:
|
|
557
|
+
self._project_index[project_id] = set()
|
|
558
|
+
self._project_index[project_id].add(key)
|
|
559
|
+
|
|
560
|
+
self._stats.current_size = len(self._cache)
|
|
561
|
+
self._record_set_time(start_time)
|
|
562
|
+
logger.debug(f"Cached result for query: {query[:50]}...")
|
|
563
|
+
|
|
564
|
+
def invalidate(
|
|
565
|
+
self,
|
|
566
|
+
agent: Optional[str] = None,
|
|
567
|
+
project_id: Optional[str] = None,
|
|
568
|
+
) -> int:
|
|
569
|
+
"""
|
|
570
|
+
Invalidate cache entries matching criteria.
|
|
571
|
+
|
|
572
|
+
If no criteria provided, clears entire cache.
|
|
573
|
+
|
|
574
|
+
Args:
|
|
575
|
+
agent: Invalidate entries for this agent
|
|
576
|
+
project_id: Invalidate entries for this project
|
|
577
|
+
|
|
578
|
+
Returns:
|
|
579
|
+
Number of entries invalidated
|
|
580
|
+
"""
|
|
581
|
+
with self._lock:
|
|
582
|
+
if agent is None and project_id is None:
|
|
583
|
+
# Clear all
|
|
584
|
+
count = len(self._cache)
|
|
585
|
+
self._cache.clear()
|
|
586
|
+
self._agent_index.clear()
|
|
587
|
+
self._project_index.clear()
|
|
588
|
+
self._stats.evictions += count
|
|
589
|
+
self._stats.current_size = 0
|
|
590
|
+
if self._on_eviction and count > 0:
|
|
591
|
+
self._on_eviction(count)
|
|
592
|
+
logger.info(f"Invalidated entire cache ({count} entries)")
|
|
593
|
+
return count
|
|
594
|
+
|
|
595
|
+
keys_to_remove: set = set()
|
|
596
|
+
|
|
597
|
+
# Collect keys matching agent
|
|
598
|
+
if agent and agent in self._agent_index:
|
|
599
|
+
keys_to_remove.update(self._agent_index[agent])
|
|
600
|
+
|
|
601
|
+
# Collect keys matching project (intersection if both specified)
|
|
602
|
+
if project_id and project_id in self._project_index:
|
|
603
|
+
project_keys = self._project_index[project_id]
|
|
604
|
+
if agent:
|
|
605
|
+
# Intersection: both agent AND project must match
|
|
606
|
+
keys_to_remove = keys_to_remove.intersection(project_keys)
|
|
607
|
+
else:
|
|
608
|
+
keys_to_remove.update(project_keys)
|
|
609
|
+
|
|
610
|
+
# Remove matched entries
|
|
611
|
+
count = 0
|
|
612
|
+
for key in keys_to_remove:
|
|
613
|
+
if key in self._cache:
|
|
614
|
+
entry = self._cache[key]
|
|
615
|
+
self._remove_entry(key, entry)
|
|
616
|
+
count += 1
|
|
617
|
+
|
|
618
|
+
self._stats.evictions += count
|
|
619
|
+
if self._on_eviction and count > 0:
|
|
620
|
+
self._on_eviction(count)
|
|
621
|
+
logger.info(
|
|
622
|
+
f"Invalidated {count} cache entries for agent={agent}, project={project_id}"
|
|
623
|
+
)
|
|
624
|
+
return count
|
|
625
|
+
|
|
626
|
+
def _remove_entry(self, key: str, entry: CacheEntry) -> None:
|
|
627
|
+
"""Remove an entry from cache and indexes."""
|
|
628
|
+
del self._cache[key]
|
|
629
|
+
|
|
630
|
+
# Update indexes
|
|
631
|
+
if entry.agent in self._agent_index:
|
|
632
|
+
self._agent_index[entry.agent].discard(key)
|
|
633
|
+
if not self._agent_index[entry.agent]:
|
|
634
|
+
del self._agent_index[entry.agent]
|
|
635
|
+
|
|
636
|
+
if entry.project_id in self._project_index:
|
|
637
|
+
self._project_index[entry.project_id].discard(key)
|
|
638
|
+
if not self._project_index[entry.project_id]:
|
|
639
|
+
del self._project_index[entry.project_id]
|
|
640
|
+
|
|
641
|
+
self._stats.current_size = len(self._cache)
|
|
642
|
+
|
|
643
|
+
def _cleanup_expired(self) -> None:
|
|
644
|
+
"""Remove all expired entries."""
|
|
645
|
+
now = time.time()
|
|
646
|
+
expired = [
|
|
647
|
+
(key, entry) for key, entry in self._cache.items() if now > entry.expires_at
|
|
648
|
+
]
|
|
649
|
+
|
|
650
|
+
for key, entry in expired:
|
|
651
|
+
self._remove_entry(key, entry)
|
|
652
|
+
|
|
653
|
+
if expired:
|
|
654
|
+
self._stats.evictions += len(expired)
|
|
655
|
+
if self._on_eviction:
|
|
656
|
+
self._on_eviction(len(expired))
|
|
657
|
+
logger.debug(f"Cleaned up {len(expired)} expired cache entries")
|
|
658
|
+
|
|
659
|
+
self._last_cleanup = now
|
|
660
|
+
|
|
661
|
+
def _evict_lru(self) -> None:
|
|
662
|
+
"""Evict least recently used entry (based on hit count and age)."""
|
|
663
|
+
if not self._cache:
|
|
664
|
+
return
|
|
665
|
+
|
|
666
|
+
# Find entry with lowest score (hit_count / age)
|
|
667
|
+
now = time.time()
|
|
668
|
+
worst_key = None
|
|
669
|
+
worst_entry = None
|
|
670
|
+
worst_score = float("inf")
|
|
671
|
+
|
|
672
|
+
for key, entry in self._cache.items():
|
|
673
|
+
age = now - entry.created_at + 1 # +1 to avoid division by zero
|
|
674
|
+
score = (entry.hit_count + 1) / age
|
|
675
|
+
if score < worst_score:
|
|
676
|
+
worst_score = score
|
|
677
|
+
worst_key = key
|
|
678
|
+
worst_entry = entry
|
|
679
|
+
|
|
680
|
+
if worst_key and worst_entry:
|
|
681
|
+
self._remove_entry(worst_key, worst_entry)
|
|
682
|
+
self._stats.evictions += 1
|
|
683
|
+
if self._on_eviction:
|
|
684
|
+
self._on_eviction(1)
|
|
685
|
+
logger.debug("Evicted LRU cache entry")
|
|
686
|
+
|
|
687
|
+
def _record_get_time(self, start_time: float) -> None:
|
|
688
|
+
"""Record get operation timing."""
|
|
689
|
+
if self._metrics:
|
|
690
|
+
duration_ms = (time.time() - start_time) * 1000
|
|
691
|
+
self._metrics.record_get(duration_ms)
|
|
692
|
+
self._stats.total_get_calls += 1
|
|
693
|
+
|
|
694
|
+
def _record_set_time(self, start_time: float) -> None:
|
|
695
|
+
"""Record set operation timing."""
|
|
696
|
+
if self._metrics:
|
|
697
|
+
duration_ms = (time.time() - start_time) * 1000
|
|
698
|
+
self._metrics.record_set(duration_ms)
|
|
699
|
+
self._stats.total_set_calls += 1
|
|
700
|
+
|
|
701
|
+
def get_stats(self) -> CacheStats:
|
|
702
|
+
"""Get cache statistics with performance metrics."""
|
|
703
|
+
with self._lock:
|
|
704
|
+
self._stats.current_size = len(self._cache)
|
|
705
|
+
|
|
706
|
+
if self._metrics:
|
|
707
|
+
self._stats.avg_get_time_ms = self._metrics.get_avg(
|
|
708
|
+
self._metrics.get_times
|
|
709
|
+
)
|
|
710
|
+
self._stats.avg_set_time_ms = self._metrics.get_avg(
|
|
711
|
+
self._metrics.set_times
|
|
712
|
+
)
|
|
713
|
+
self._stats.p95_get_time_ms = self._metrics.get_percentile(
|
|
714
|
+
self._metrics.get_times, 95
|
|
715
|
+
)
|
|
716
|
+
self._stats.p95_set_time_ms = self._metrics.get_percentile(
|
|
717
|
+
self._metrics.set_times, 95
|
|
718
|
+
)
|
|
719
|
+
|
|
720
|
+
return self._stats
|
|
721
|
+
|
|
722
|
+
def clear(self) -> None:
|
|
723
|
+
"""Clear all cache entries."""
|
|
724
|
+
with self._lock:
|
|
725
|
+
count = len(self._cache)
|
|
726
|
+
self._cache.clear()
|
|
727
|
+
self._agent_index.clear()
|
|
728
|
+
self._project_index.clear()
|
|
729
|
+
self._stats = CacheStats(max_size=self.max_entries)
|
|
730
|
+
if self._metrics:
|
|
731
|
+
self._metrics = PerformanceMetrics()
|
|
732
|
+
logger.info(f"Cleared cache ({count} entries)")
|
|
733
|
+
|
|
734
|
+
|
|
735
|
+
# ==================== REDIS CACHE ====================
|
|
736
|
+
|
|
737
|
+
|
|
738
|
+
class RedisCache(CacheBackend):
|
|
739
|
+
"""
|
|
740
|
+
Redis-based cache for distributed deployments.
|
|
741
|
+
|
|
742
|
+
Features:
|
|
743
|
+
- Distributed caching across multiple instances
|
|
744
|
+
- Built-in TTL via Redis EXPIRE
|
|
745
|
+
- Selective invalidation using key prefixes and patterns
|
|
746
|
+
- Performance metrics tracking
|
|
747
|
+
- Automatic reconnection handling
|
|
748
|
+
"""
|
|
749
|
+
|
|
750
|
+
def __init__(
|
|
751
|
+
self,
|
|
752
|
+
host: str = "localhost",
|
|
753
|
+
port: int = 6379,
|
|
754
|
+
db: int = 0,
|
|
755
|
+
password: Optional[str] = None,
|
|
756
|
+
ttl_seconds: int = 300,
|
|
757
|
+
key_prefix: str = "alma:cache:",
|
|
758
|
+
connection_pool_size: int = 10,
|
|
759
|
+
enable_metrics: bool = True,
|
|
760
|
+
namespace: Optional[str] = None,
|
|
761
|
+
):
|
|
762
|
+
"""
|
|
763
|
+
Initialize Redis cache.
|
|
764
|
+
|
|
765
|
+
Args:
|
|
766
|
+
host: Redis host
|
|
767
|
+
port: Redis port
|
|
768
|
+
db: Redis database number
|
|
769
|
+
password: Redis password (optional)
|
|
770
|
+
ttl_seconds: Default TTL for cache entries
|
|
771
|
+
key_prefix: Prefix for all cache keys
|
|
772
|
+
connection_pool_size: Size of connection pool
|
|
773
|
+
enable_metrics: Whether to track performance metrics
|
|
774
|
+
namespace: Optional namespace for cache isolation (default: "alma")
|
|
775
|
+
"""
|
|
776
|
+
self.ttl = ttl_seconds
|
|
777
|
+
self.key_prefix = key_prefix
|
|
778
|
+
self.enable_metrics = enable_metrics
|
|
779
|
+
self.namespace = namespace
|
|
780
|
+
|
|
781
|
+
# Initialize collision-resistant key generator with namespace
|
|
782
|
+
self._key_generator = CacheKeyGenerator(namespace=namespace)
|
|
783
|
+
|
|
784
|
+
self._stats = CacheStats()
|
|
785
|
+
self._metrics = PerformanceMetrics() if enable_metrics else None
|
|
786
|
+
self._lock = threading.RLock()
|
|
787
|
+
|
|
788
|
+
# Monitoring hooks
|
|
789
|
+
self._on_hit: Optional[Callable[[str, float], None]] = None
|
|
790
|
+
self._on_miss: Optional[Callable[[str], None]] = None
|
|
791
|
+
self._on_eviction: Optional[Callable[[int], None]] = None
|
|
792
|
+
|
|
793
|
+
# Try to import redis
|
|
794
|
+
try:
|
|
795
|
+
import redis
|
|
796
|
+
|
|
797
|
+
self._redis = redis.Redis(
|
|
798
|
+
host=host,
|
|
799
|
+
port=port,
|
|
800
|
+
db=db,
|
|
801
|
+
password=password,
|
|
802
|
+
max_connections=connection_pool_size,
|
|
803
|
+
decode_responses=False, # We handle encoding ourselves
|
|
804
|
+
)
|
|
805
|
+
# Test connection
|
|
806
|
+
self._redis.ping()
|
|
807
|
+
logger.info(f"Connected to Redis at {host}:{port}")
|
|
808
|
+
except ImportError as err:
|
|
809
|
+
raise ImportError(
|
|
810
|
+
"redis package required for RedisCache. Install with: pip install redis"
|
|
811
|
+
) from err
|
|
812
|
+
except Exception as e:
|
|
813
|
+
raise ConnectionError(f"Failed to connect to Redis: {e}") from e
|
|
814
|
+
|
|
815
|
+
def set_hooks(
|
|
816
|
+
self,
|
|
817
|
+
on_hit: Optional[Callable[[str, float], None]] = None,
|
|
818
|
+
on_miss: Optional[Callable[[str], None]] = None,
|
|
819
|
+
on_eviction: Optional[Callable[[int], None]] = None,
|
|
820
|
+
):
|
|
821
|
+
"""Set monitoring hooks for cache events."""
|
|
822
|
+
self._on_hit = on_hit
|
|
823
|
+
self._on_miss = on_miss
|
|
824
|
+
self._on_eviction = on_eviction
|
|
825
|
+
|
|
826
|
+
def _generate_key(
|
|
827
|
+
self,
|
|
828
|
+
query: str,
|
|
829
|
+
agent: str,
|
|
830
|
+
project_id: str,
|
|
831
|
+
user_id: Optional[str] = None,
|
|
832
|
+
top_k: int = 5,
|
|
833
|
+
) -> str:
|
|
834
|
+
"""
|
|
835
|
+
Generate a collision-resistant cache key with Redis prefix for pattern matching.
|
|
836
|
+
|
|
837
|
+
Uses the CacheKeyGenerator for the hash component, then wraps it with
|
|
838
|
+
Redis-specific prefix structure for pattern-based invalidation.
|
|
839
|
+
|
|
840
|
+
Structure: {redis_prefix}{project}:{agent}:{namespace}:{version}:{hash}
|
|
841
|
+
|
|
842
|
+
Args:
|
|
843
|
+
query: The search query (will be normalized)
|
|
844
|
+
agent: Agent identifier
|
|
845
|
+
project_id: Project identifier
|
|
846
|
+
user_id: Optional user identifier
|
|
847
|
+
top_k: Number of results requested
|
|
848
|
+
|
|
849
|
+
Returns:
|
|
850
|
+
Redis cache key with prefix for pattern matching
|
|
851
|
+
"""
|
|
852
|
+
# Generate collision-resistant key
|
|
853
|
+
base_key = self._key_generator.generate(
|
|
854
|
+
query=query,
|
|
855
|
+
agent=agent,
|
|
856
|
+
project_id=project_id,
|
|
857
|
+
user_id=user_id,
|
|
858
|
+
top_k=top_k,
|
|
859
|
+
)
|
|
860
|
+
|
|
861
|
+
# Structure: prefix:project:agent:base_key
|
|
862
|
+
# This enables pattern-based invalidation by project or agent
|
|
863
|
+
return f"{self.key_prefix}{project_id}:{agent}:{base_key}"
|
|
864
|
+
|
|
865
|
+
def _serialize_result(self, result: MemorySlice) -> bytes:
|
|
866
|
+
"""Serialize MemorySlice to bytes."""
|
|
867
|
+
data = {
|
|
868
|
+
"query": result.query,
|
|
869
|
+
"agent": result.agent,
|
|
870
|
+
"retrieval_time_ms": result.retrieval_time_ms,
|
|
871
|
+
"heuristics": [
|
|
872
|
+
{
|
|
873
|
+
"id": h.id,
|
|
874
|
+
"agent": h.agent,
|
|
875
|
+
"project_id": h.project_id,
|
|
876
|
+
"condition": h.condition,
|
|
877
|
+
"strategy": h.strategy,
|
|
878
|
+
"confidence": h.confidence,
|
|
879
|
+
"occurrence_count": h.occurrence_count,
|
|
880
|
+
"success_count": h.success_count,
|
|
881
|
+
"last_validated": (
|
|
882
|
+
h.last_validated.isoformat() if h.last_validated else None
|
|
883
|
+
),
|
|
884
|
+
"created_at": h.created_at.isoformat() if h.created_at else None,
|
|
885
|
+
}
|
|
886
|
+
for h in result.heuristics
|
|
887
|
+
],
|
|
888
|
+
"outcomes": [
|
|
889
|
+
{
|
|
890
|
+
"id": o.id,
|
|
891
|
+
"agent": o.agent,
|
|
892
|
+
"project_id": o.project_id,
|
|
893
|
+
"task_type": o.task_type,
|
|
894
|
+
"task_description": o.task_description,
|
|
895
|
+
"success": o.success,
|
|
896
|
+
"strategy_used": o.strategy_used,
|
|
897
|
+
"duration_ms": o.duration_ms,
|
|
898
|
+
"timestamp": o.timestamp.isoformat() if o.timestamp else None,
|
|
899
|
+
}
|
|
900
|
+
for o in result.outcomes
|
|
901
|
+
],
|
|
902
|
+
"preferences": [
|
|
903
|
+
{
|
|
904
|
+
"id": p.id,
|
|
905
|
+
"user_id": p.user_id,
|
|
906
|
+
"category": p.category,
|
|
907
|
+
"preference": p.preference,
|
|
908
|
+
"source": p.source,
|
|
909
|
+
"confidence": p.confidence,
|
|
910
|
+
}
|
|
911
|
+
for p in result.preferences
|
|
912
|
+
],
|
|
913
|
+
"domain_knowledge": [
|
|
914
|
+
{
|
|
915
|
+
"id": dk.id,
|
|
916
|
+
"agent": dk.agent,
|
|
917
|
+
"project_id": dk.project_id,
|
|
918
|
+
"domain": dk.domain,
|
|
919
|
+
"fact": dk.fact,
|
|
920
|
+
"source": dk.source,
|
|
921
|
+
"confidence": dk.confidence,
|
|
922
|
+
}
|
|
923
|
+
for dk in result.domain_knowledge
|
|
924
|
+
],
|
|
925
|
+
"anti_patterns": [
|
|
926
|
+
{
|
|
927
|
+
"id": ap.id,
|
|
928
|
+
"agent": ap.agent,
|
|
929
|
+
"project_id": ap.project_id,
|
|
930
|
+
"pattern": ap.pattern,
|
|
931
|
+
"why_bad": ap.why_bad,
|
|
932
|
+
"better_alternative": ap.better_alternative,
|
|
933
|
+
"occurrence_count": ap.occurrence_count,
|
|
934
|
+
}
|
|
935
|
+
for ap in result.anti_patterns
|
|
936
|
+
],
|
|
937
|
+
}
|
|
938
|
+
return json.dumps(data).encode("utf-8")
|
|
939
|
+
|
|
940
|
+
def _deserialize_result(self, data: bytes) -> MemorySlice:
|
|
941
|
+
"""Deserialize bytes to MemorySlice."""
|
|
942
|
+
from alma.types import (
|
|
943
|
+
AntiPattern,
|
|
944
|
+
DomainKnowledge,
|
|
945
|
+
Heuristic,
|
|
946
|
+
Outcome,
|
|
947
|
+
UserPreference,
|
|
948
|
+
)
|
|
949
|
+
|
|
950
|
+
obj = json.loads(data.decode("utf-8"))
|
|
951
|
+
|
|
952
|
+
def parse_datetime(s):
|
|
953
|
+
if s is None:
|
|
954
|
+
return datetime.now(timezone.utc)
|
|
955
|
+
return datetime.fromisoformat(s.replace("Z", "+00:00"))
|
|
956
|
+
|
|
957
|
+
heuristics = [
|
|
958
|
+
Heuristic(
|
|
959
|
+
id=h["id"],
|
|
960
|
+
agent=h["agent"],
|
|
961
|
+
project_id=h["project_id"],
|
|
962
|
+
condition=h["condition"],
|
|
963
|
+
strategy=h["strategy"],
|
|
964
|
+
confidence=h["confidence"],
|
|
965
|
+
occurrence_count=h["occurrence_count"],
|
|
966
|
+
success_count=h["success_count"],
|
|
967
|
+
last_validated=parse_datetime(h.get("last_validated")),
|
|
968
|
+
created_at=parse_datetime(h.get("created_at")),
|
|
969
|
+
)
|
|
970
|
+
for h in obj.get("heuristics", [])
|
|
971
|
+
]
|
|
972
|
+
|
|
973
|
+
outcomes = [
|
|
974
|
+
Outcome(
|
|
975
|
+
id=o["id"],
|
|
976
|
+
agent=o["agent"],
|
|
977
|
+
project_id=o["project_id"],
|
|
978
|
+
task_type=o["task_type"],
|
|
979
|
+
task_description=o["task_description"],
|
|
980
|
+
success=o["success"],
|
|
981
|
+
strategy_used=o["strategy_used"],
|
|
982
|
+
duration_ms=o.get("duration_ms"),
|
|
983
|
+
timestamp=parse_datetime(o.get("timestamp")),
|
|
984
|
+
)
|
|
985
|
+
for o in obj.get("outcomes", [])
|
|
986
|
+
]
|
|
987
|
+
|
|
988
|
+
preferences = [
|
|
989
|
+
UserPreference(
|
|
990
|
+
id=p["id"],
|
|
991
|
+
user_id=p["user_id"],
|
|
992
|
+
category=p["category"],
|
|
993
|
+
preference=p["preference"],
|
|
994
|
+
source=p["source"],
|
|
995
|
+
confidence=p.get("confidence", 1.0),
|
|
996
|
+
)
|
|
997
|
+
for p in obj.get("preferences", [])
|
|
998
|
+
]
|
|
999
|
+
|
|
1000
|
+
domain_knowledge = [
|
|
1001
|
+
DomainKnowledge(
|
|
1002
|
+
id=dk["id"],
|
|
1003
|
+
agent=dk["agent"],
|
|
1004
|
+
project_id=dk["project_id"],
|
|
1005
|
+
domain=dk["domain"],
|
|
1006
|
+
fact=dk["fact"],
|
|
1007
|
+
source=dk["source"],
|
|
1008
|
+
confidence=dk.get("confidence", 1.0),
|
|
1009
|
+
)
|
|
1010
|
+
for dk in obj.get("domain_knowledge", [])
|
|
1011
|
+
]
|
|
1012
|
+
|
|
1013
|
+
anti_patterns = [
|
|
1014
|
+
AntiPattern(
|
|
1015
|
+
id=ap["id"],
|
|
1016
|
+
agent=ap["agent"],
|
|
1017
|
+
project_id=ap["project_id"],
|
|
1018
|
+
pattern=ap["pattern"],
|
|
1019
|
+
why_bad=ap["why_bad"],
|
|
1020
|
+
better_alternative=ap["better_alternative"],
|
|
1021
|
+
occurrence_count=ap["occurrence_count"],
|
|
1022
|
+
last_seen=datetime.now(timezone.utc),
|
|
1023
|
+
)
|
|
1024
|
+
for ap in obj.get("anti_patterns", [])
|
|
1025
|
+
]
|
|
1026
|
+
|
|
1027
|
+
return MemorySlice(
|
|
1028
|
+
heuristics=heuristics,
|
|
1029
|
+
outcomes=outcomes,
|
|
1030
|
+
preferences=preferences,
|
|
1031
|
+
domain_knowledge=domain_knowledge,
|
|
1032
|
+
anti_patterns=anti_patterns,
|
|
1033
|
+
query=obj.get("query"),
|
|
1034
|
+
agent=obj.get("agent"),
|
|
1035
|
+
retrieval_time_ms=obj.get("retrieval_time_ms"),
|
|
1036
|
+
)
|
|
1037
|
+
|
|
1038
|
+
def get(
|
|
1039
|
+
self,
|
|
1040
|
+
query: str,
|
|
1041
|
+
agent: str,
|
|
1042
|
+
project_id: str,
|
|
1043
|
+
user_id: Optional[str] = None,
|
|
1044
|
+
top_k: int = 5,
|
|
1045
|
+
) -> Optional[MemorySlice]:
|
|
1046
|
+
"""Get cached result from Redis."""
|
|
1047
|
+
start_time = time.time()
|
|
1048
|
+
key = self._generate_key(query, agent, project_id, user_id, top_k)
|
|
1049
|
+
|
|
1050
|
+
try:
|
|
1051
|
+
data = self._redis.get(key)
|
|
1052
|
+
|
|
1053
|
+
if data is None:
|
|
1054
|
+
with self._lock:
|
|
1055
|
+
self._stats.misses += 1
|
|
1056
|
+
if self._on_miss:
|
|
1057
|
+
self._on_miss(key)
|
|
1058
|
+
self._record_get_time(start_time)
|
|
1059
|
+
return None
|
|
1060
|
+
|
|
1061
|
+
result = self._deserialize_result(data)
|
|
1062
|
+
with self._lock:
|
|
1063
|
+
self._stats.hits += 1
|
|
1064
|
+
latency_ms = (time.time() - start_time) * 1000
|
|
1065
|
+
if self._on_hit:
|
|
1066
|
+
self._on_hit(key, latency_ms)
|
|
1067
|
+
self._record_get_time(start_time)
|
|
1068
|
+
logger.debug(f"Redis cache hit for query: {query[:50]}...")
|
|
1069
|
+
return result
|
|
1070
|
+
|
|
1071
|
+
except Exception as e:
|
|
1072
|
+
logger.warning(f"Redis get error: {e}")
|
|
1073
|
+
with self._lock:
|
|
1074
|
+
self._stats.misses += 1
|
|
1075
|
+
self._record_get_time(start_time)
|
|
1076
|
+
return None
|
|
1077
|
+
|
|
1078
|
+
def set(
|
|
1079
|
+
self,
|
|
1080
|
+
query: str,
|
|
1081
|
+
agent: str,
|
|
1082
|
+
project_id: str,
|
|
1083
|
+
result: MemorySlice,
|
|
1084
|
+
user_id: Optional[str] = None,
|
|
1085
|
+
top_k: int = 5,
|
|
1086
|
+
ttl_override: Optional[int] = None,
|
|
1087
|
+
) -> None:
|
|
1088
|
+
"""Cache a retrieval result in Redis."""
|
|
1089
|
+
start_time = time.time()
|
|
1090
|
+
key = self._generate_key(query, agent, project_id, user_id, top_k)
|
|
1091
|
+
ttl = ttl_override or self.ttl
|
|
1092
|
+
|
|
1093
|
+
try:
|
|
1094
|
+
data = self._serialize_result(result)
|
|
1095
|
+
self._redis.setex(key, ttl, data)
|
|
1096
|
+
self._record_set_time(start_time)
|
|
1097
|
+
logger.debug(f"Redis cached result for query: {query[:50]}...")
|
|
1098
|
+
|
|
1099
|
+
except Exception as e:
|
|
1100
|
+
logger.warning(f"Redis set error: {e}")
|
|
1101
|
+
self._record_set_time(start_time)
|
|
1102
|
+
|
|
1103
|
+
def invalidate(
|
|
1104
|
+
self,
|
|
1105
|
+
agent: Optional[str] = None,
|
|
1106
|
+
project_id: Optional[str] = None,
|
|
1107
|
+
) -> int:
|
|
1108
|
+
"""
|
|
1109
|
+
Invalidate cache entries using Redis pattern matching.
|
|
1110
|
+
|
|
1111
|
+
Pattern structure: prefix:project:agent:hash
|
|
1112
|
+
"""
|
|
1113
|
+
try:
|
|
1114
|
+
if agent is None and project_id is None:
|
|
1115
|
+
# Clear all ALMA cache keys
|
|
1116
|
+
pattern = f"{self.key_prefix}*"
|
|
1117
|
+
elif project_id and agent:
|
|
1118
|
+
# Specific project and agent
|
|
1119
|
+
pattern = f"{self.key_prefix}{project_id}:{agent}:*"
|
|
1120
|
+
elif project_id:
|
|
1121
|
+
# All agents for a project
|
|
1122
|
+
pattern = f"{self.key_prefix}{project_id}:*"
|
|
1123
|
+
elif agent:
|
|
1124
|
+
# Specific agent across all projects
|
|
1125
|
+
pattern = f"{self.key_prefix}*:{agent}:*"
|
|
1126
|
+
else:
|
|
1127
|
+
return 0
|
|
1128
|
+
|
|
1129
|
+
# Use SCAN for safe iteration over keys
|
|
1130
|
+
count = 0
|
|
1131
|
+
cursor = 0
|
|
1132
|
+
while True:
|
|
1133
|
+
cursor, keys = self._redis.scan(cursor, match=pattern, count=100)
|
|
1134
|
+
if keys:
|
|
1135
|
+
self._redis.delete(*keys)
|
|
1136
|
+
count += len(keys)
|
|
1137
|
+
if cursor == 0:
|
|
1138
|
+
break
|
|
1139
|
+
|
|
1140
|
+
with self._lock:
|
|
1141
|
+
self._stats.evictions += count
|
|
1142
|
+
if self._on_eviction and count > 0:
|
|
1143
|
+
self._on_eviction(count)
|
|
1144
|
+
logger.info(
|
|
1145
|
+
f"Invalidated {count} Redis cache entries for agent={agent}, project={project_id}"
|
|
1146
|
+
)
|
|
1147
|
+
return count
|
|
1148
|
+
|
|
1149
|
+
except Exception as e:
|
|
1150
|
+
logger.warning(f"Redis invalidate error: {e}")
|
|
1151
|
+
return 0
|
|
1152
|
+
|
|
1153
|
+
def _record_get_time(self, start_time: float) -> None:
|
|
1154
|
+
"""Record get operation timing."""
|
|
1155
|
+
if self._metrics:
|
|
1156
|
+
with self._lock:
|
|
1157
|
+
duration_ms = (time.time() - start_time) * 1000
|
|
1158
|
+
self._metrics.record_get(duration_ms)
|
|
1159
|
+
self._stats.total_get_calls += 1
|
|
1160
|
+
|
|
1161
|
+
def _record_set_time(self, start_time: float) -> None:
|
|
1162
|
+
"""Record set operation timing."""
|
|
1163
|
+
if self._metrics:
|
|
1164
|
+
with self._lock:
|
|
1165
|
+
duration_ms = (time.time() - start_time) * 1000
|
|
1166
|
+
self._metrics.record_set(duration_ms)
|
|
1167
|
+
self._stats.total_set_calls += 1
|
|
1168
|
+
|
|
1169
|
+
def get_stats(self) -> CacheStats:
|
|
1170
|
+
"""Get cache statistics."""
|
|
1171
|
+
try:
|
|
1172
|
+
# Get current cache size from Redis
|
|
1173
|
+
pattern = f"{self.key_prefix}*"
|
|
1174
|
+
cursor = 0
|
|
1175
|
+
count = 0
|
|
1176
|
+
while True:
|
|
1177
|
+
cursor, keys = self._redis.scan(cursor, match=pattern, count=100)
|
|
1178
|
+
count += len(keys)
|
|
1179
|
+
if cursor == 0:
|
|
1180
|
+
break
|
|
1181
|
+
|
|
1182
|
+
with self._lock:
|
|
1183
|
+
self._stats.current_size = count
|
|
1184
|
+
|
|
1185
|
+
if self._metrics:
|
|
1186
|
+
self._stats.avg_get_time_ms = self._metrics.get_avg(
|
|
1187
|
+
self._metrics.get_times
|
|
1188
|
+
)
|
|
1189
|
+
self._stats.avg_set_time_ms = self._metrics.get_avg(
|
|
1190
|
+
self._metrics.set_times
|
|
1191
|
+
)
|
|
1192
|
+
self._stats.p95_get_time_ms = self._metrics.get_percentile(
|
|
1193
|
+
self._metrics.get_times, 95
|
|
1194
|
+
)
|
|
1195
|
+
self._stats.p95_set_time_ms = self._metrics.get_percentile(
|
|
1196
|
+
self._metrics.set_times, 95
|
|
1197
|
+
)
|
|
1198
|
+
|
|
1199
|
+
return self._stats
|
|
1200
|
+
|
|
1201
|
+
except Exception as e:
|
|
1202
|
+
logger.warning(f"Redis get_stats error: {e}")
|
|
1203
|
+
return self._stats
|
|
1204
|
+
|
|
1205
|
+
def clear(self) -> None:
|
|
1206
|
+
"""Clear all ALMA cache entries from Redis."""
|
|
1207
|
+
try:
|
|
1208
|
+
count = self.invalidate()
|
|
1209
|
+
with self._lock:
|
|
1210
|
+
self._stats = CacheStats()
|
|
1211
|
+
if self._metrics:
|
|
1212
|
+
self._metrics = PerformanceMetrics()
|
|
1213
|
+
logger.info(f"Cleared Redis cache ({count} entries)")
|
|
1214
|
+
except Exception as e:
|
|
1215
|
+
logger.warning(f"Redis clear error: {e}")
|
|
1216
|
+
|
|
1217
|
+
|
|
1218
|
+
# ==================== NULL CACHE ====================
|
|
1219
|
+
|
|
1220
|
+
|
|
1221
|
+
class NullCache(CacheBackend):
|
|
1222
|
+
"""
|
|
1223
|
+
A no-op cache implementation for testing or when caching is disabled.
|
|
1224
|
+
|
|
1225
|
+
All operations are valid but don't actually cache anything.
|
|
1226
|
+
"""
|
|
1227
|
+
|
|
1228
|
+
def __init__(self):
|
|
1229
|
+
"""Initialize null cache."""
|
|
1230
|
+
self._stats = CacheStats()
|
|
1231
|
+
|
|
1232
|
+
def get(self, *args, **kwargs) -> Optional[MemorySlice]:
|
|
1233
|
+
"""Always returns None (cache miss)."""
|
|
1234
|
+
self._stats.misses += 1
|
|
1235
|
+
return None
|
|
1236
|
+
|
|
1237
|
+
def set(self, *args, **kwargs) -> None:
|
|
1238
|
+
"""No-op."""
|
|
1239
|
+
pass
|
|
1240
|
+
|
|
1241
|
+
def invalidate(self, *args, **kwargs) -> int:
|
|
1242
|
+
"""No-op."""
|
|
1243
|
+
return 0
|
|
1244
|
+
|
|
1245
|
+
def get_stats(self) -> CacheStats:
|
|
1246
|
+
"""Get cache statistics."""
|
|
1247
|
+
return self._stats
|
|
1248
|
+
|
|
1249
|
+
def clear(self) -> None:
|
|
1250
|
+
"""No-op."""
|
|
1251
|
+
pass
|
|
1252
|
+
|
|
1253
|
+
|
|
1254
|
+
# ==================== CACHE FACTORY ====================
|
|
1255
|
+
|
|
1256
|
+
|
|
1257
|
+
def create_cache(
|
|
1258
|
+
backend: str = "memory",
|
|
1259
|
+
ttl_seconds: int = 300,
|
|
1260
|
+
max_entries: int = 1000,
|
|
1261
|
+
redis_host: str = "localhost",
|
|
1262
|
+
redis_port: int = 6379,
|
|
1263
|
+
redis_password: Optional[str] = None,
|
|
1264
|
+
redis_db: int = 0,
|
|
1265
|
+
enable_metrics: bool = True,
|
|
1266
|
+
namespace: Optional[str] = None,
|
|
1267
|
+
) -> CacheBackend:
|
|
1268
|
+
"""
|
|
1269
|
+
Factory function to create a cache backend.
|
|
1270
|
+
|
|
1271
|
+
Args:
|
|
1272
|
+
backend: "memory", "redis", or "null"
|
|
1273
|
+
ttl_seconds: TTL for cache entries
|
|
1274
|
+
max_entries: Max entries for memory cache
|
|
1275
|
+
redis_host: Redis host (for redis backend)
|
|
1276
|
+
redis_port: Redis port (for redis backend)
|
|
1277
|
+
redis_password: Redis password (for redis backend)
|
|
1278
|
+
redis_db: Redis database number (for redis backend)
|
|
1279
|
+
enable_metrics: Whether to track performance metrics
|
|
1280
|
+
namespace: Optional namespace for cache isolation (default: "alma").
|
|
1281
|
+
Useful for multi-agent or multi-tenant deployments.
|
|
1282
|
+
|
|
1283
|
+
Returns:
|
|
1284
|
+
Configured CacheBackend instance
|
|
1285
|
+
"""
|
|
1286
|
+
if backend == "redis":
|
|
1287
|
+
return RedisCache(
|
|
1288
|
+
host=redis_host,
|
|
1289
|
+
port=redis_port,
|
|
1290
|
+
db=redis_db,
|
|
1291
|
+
password=redis_password,
|
|
1292
|
+
ttl_seconds=ttl_seconds,
|
|
1293
|
+
enable_metrics=enable_metrics,
|
|
1294
|
+
namespace=namespace,
|
|
1295
|
+
)
|
|
1296
|
+
elif backend == "null":
|
|
1297
|
+
return NullCache()
|
|
1298
|
+
else:
|
|
1299
|
+
return RetrievalCache(
|
|
1300
|
+
ttl_seconds=ttl_seconds,
|
|
1301
|
+
max_entries=max_entries,
|
|
1302
|
+
enable_metrics=enable_metrics,
|
|
1303
|
+
namespace=namespace,
|
|
1304
|
+
)
|