alma-memory 0.5.1__py3-none-any.whl → 0.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- alma/__init__.py +296 -226
- alma/compression/__init__.py +33 -0
- alma/compression/pipeline.py +980 -0
- alma/confidence/__init__.py +47 -47
- alma/confidence/engine.py +540 -540
- alma/confidence/types.py +351 -351
- alma/config/loader.py +157 -157
- alma/consolidation/__init__.py +23 -23
- alma/consolidation/engine.py +678 -678
- alma/consolidation/prompts.py +84 -84
- alma/core.py +1189 -430
- alma/domains/__init__.py +30 -30
- alma/domains/factory.py +359 -359
- alma/domains/schemas.py +448 -448
- alma/domains/types.py +272 -272
- alma/events/__init__.py +75 -75
- alma/events/emitter.py +285 -284
- alma/events/storage_mixin.py +246 -246
- alma/events/types.py +126 -126
- alma/events/webhook.py +425 -425
- alma/exceptions.py +49 -49
- alma/extraction/__init__.py +31 -31
- alma/extraction/auto_learner.py +265 -265
- alma/extraction/extractor.py +420 -420
- alma/graph/__init__.py +106 -106
- alma/graph/backends/__init__.py +32 -32
- alma/graph/backends/kuzu.py +624 -624
- alma/graph/backends/memgraph.py +432 -432
- alma/graph/backends/memory.py +236 -236
- alma/graph/backends/neo4j.py +417 -417
- alma/graph/base.py +159 -159
- alma/graph/extraction.py +198 -198
- alma/graph/store.py +860 -860
- alma/harness/__init__.py +35 -35
- alma/harness/base.py +386 -386
- alma/harness/domains.py +705 -705
- alma/initializer/__init__.py +37 -37
- alma/initializer/initializer.py +418 -418
- alma/initializer/types.py +250 -250
- alma/integration/__init__.py +62 -62
- alma/integration/claude_agents.py +444 -444
- alma/integration/helena.py +423 -423
- alma/integration/victor.py +471 -471
- alma/learning/__init__.py +101 -86
- alma/learning/decay.py +878 -0
- alma/learning/forgetting.py +1446 -1446
- alma/learning/heuristic_extractor.py +390 -390
- alma/learning/protocols.py +374 -374
- alma/learning/validation.py +346 -346
- alma/mcp/__init__.py +123 -45
- alma/mcp/__main__.py +156 -156
- alma/mcp/resources.py +122 -122
- alma/mcp/server.py +955 -591
- alma/mcp/tools.py +3254 -509
- alma/observability/__init__.py +91 -84
- alma/observability/config.py +302 -302
- alma/observability/guidelines.py +170 -0
- alma/observability/logging.py +424 -424
- alma/observability/metrics.py +583 -583
- alma/observability/tracing.py +440 -440
- alma/progress/__init__.py +21 -21
- alma/progress/tracker.py +607 -607
- alma/progress/types.py +250 -250
- alma/retrieval/__init__.py +134 -53
- alma/retrieval/budget.py +525 -0
- alma/retrieval/cache.py +1304 -1061
- alma/retrieval/embeddings.py +202 -202
- alma/retrieval/engine.py +850 -427
- alma/retrieval/modes.py +365 -0
- alma/retrieval/progressive.py +560 -0
- alma/retrieval/scoring.py +344 -344
- alma/retrieval/trust_scoring.py +637 -0
- alma/retrieval/verification.py +797 -0
- alma/session/__init__.py +19 -19
- alma/session/manager.py +442 -399
- alma/session/types.py +288 -288
- alma/storage/__init__.py +101 -90
- alma/storage/archive.py +233 -0
- alma/storage/azure_cosmos.py +1259 -1259
- alma/storage/base.py +1083 -583
- alma/storage/chroma.py +1443 -1443
- alma/storage/constants.py +103 -103
- alma/storage/file_based.py +614 -614
- alma/storage/migrations/__init__.py +21 -21
- alma/storage/migrations/base.py +321 -321
- alma/storage/migrations/runner.py +323 -323
- alma/storage/migrations/version_stores.py +337 -337
- alma/storage/migrations/versions/__init__.py +11 -11
- alma/storage/migrations/versions/v1_0_0.py +373 -373
- alma/storage/migrations/versions/v1_1_0_workflow_context.py +551 -0
- alma/storage/pinecone.py +1080 -1080
- alma/storage/postgresql.py +1948 -1559
- alma/storage/qdrant.py +1306 -1306
- alma/storage/sqlite_local.py +3041 -1457
- alma/testing/__init__.py +46 -46
- alma/testing/factories.py +301 -301
- alma/testing/mocks.py +389 -389
- alma/types.py +292 -264
- alma/utils/__init__.py +19 -0
- alma/utils/tokenizer.py +521 -0
- alma/workflow/__init__.py +83 -0
- alma/workflow/artifacts.py +170 -0
- alma/workflow/checkpoint.py +311 -0
- alma/workflow/context.py +228 -0
- alma/workflow/outcomes.py +189 -0
- alma/workflow/reducers.py +393 -0
- {alma_memory-0.5.1.dist-info → alma_memory-0.7.0.dist-info}/METADATA +210 -72
- alma_memory-0.7.0.dist-info/RECORD +112 -0
- alma_memory-0.5.1.dist-info/RECORD +0 -93
- {alma_memory-0.5.1.dist-info → alma_memory-0.7.0.dist-info}/WHEEL +0 -0
- {alma_memory-0.5.1.dist-info → alma_memory-0.7.0.dist-info}/top_level.txt +0 -0
alma/retrieval/engine.py
CHANGED
|
@@ -1,427 +1,850 @@
|
|
|
1
|
-
"""
|
|
2
|
-
ALMA Retrieval Engine.
|
|
3
|
-
|
|
4
|
-
Handles semantic search and memory retrieval with scoring and caching.
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
import
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
from alma.observability.
|
|
13
|
-
from alma.observability.
|
|
14
|
-
from alma.
|
|
15
|
-
from alma.retrieval.
|
|
16
|
-
from alma.
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
if
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
project_id=project_id,
|
|
144
|
-
agents=agents_to_query,
|
|
145
|
-
embedding=query_embedding,
|
|
146
|
-
top_k=top_k * 2,
|
|
147
|
-
|
|
148
|
-
)
|
|
149
|
-
|
|
150
|
-
project_id=project_id,
|
|
151
|
-
agents=agents_to_query,
|
|
152
|
-
embedding=query_embedding,
|
|
153
|
-
top_k=top_k * 2,
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
)
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
agent
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
)
|
|
178
|
-
|
|
179
|
-
project_id=project_id,
|
|
180
|
-
agent=agent,
|
|
181
|
-
embedding=query_embedding,
|
|
182
|
-
top_k=top_k * 2,
|
|
183
|
-
|
|
184
|
-
)
|
|
185
|
-
|
|
186
|
-
project_id=project_id,
|
|
187
|
-
agent=agent,
|
|
188
|
-
embedding=query_embedding,
|
|
189
|
-
top_k=top_k * 2,
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
preferences=
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
1
|
+
"""
|
|
2
|
+
ALMA Retrieval Engine.
|
|
3
|
+
|
|
4
|
+
Handles semantic search and memory retrieval with scoring and caching.
|
|
5
|
+
Supports mode-aware retrieval for different cognitive tasks.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import logging
|
|
9
|
+
import time
|
|
10
|
+
from typing import Any, Dict, List, Optional, Tuple
|
|
11
|
+
|
|
12
|
+
from alma.observability.logging import get_logger
|
|
13
|
+
from alma.observability.metrics import get_metrics
|
|
14
|
+
from alma.observability.tracing import get_tracer
|
|
15
|
+
from alma.retrieval.cache import NullCache, RetrievalCache
|
|
16
|
+
from alma.retrieval.modes import (
|
|
17
|
+
RetrievalMode,
|
|
18
|
+
get_mode_config,
|
|
19
|
+
get_mode_reason,
|
|
20
|
+
infer_mode_from_query,
|
|
21
|
+
)
|
|
22
|
+
from alma.retrieval.scoring import MemoryScorer, ScoredItem, ScoringWeights
|
|
23
|
+
from alma.storage.base import StorageBackend
|
|
24
|
+
from alma.types import MemoryScope, MemorySlice
|
|
25
|
+
|
|
26
|
+
logger = logging.getLogger(__name__)
|
|
27
|
+
structured_logger = get_logger(__name__)
|
|
28
|
+
tracer = get_tracer(__name__)
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class RetrievalEngine:
|
|
32
|
+
"""
|
|
33
|
+
Retrieves relevant memories for task context injection.
|
|
34
|
+
|
|
35
|
+
Features:
|
|
36
|
+
- Semantic search via embeddings
|
|
37
|
+
- Recency weighting (newer memories preferred)
|
|
38
|
+
- Success rate weighting (proven strategies ranked higher)
|
|
39
|
+
- Caching for repeated queries
|
|
40
|
+
- Configurable scoring weights
|
|
41
|
+
"""
|
|
42
|
+
|
|
43
|
+
def __init__(
|
|
44
|
+
self,
|
|
45
|
+
storage: StorageBackend,
|
|
46
|
+
embedding_provider: str = "local",
|
|
47
|
+
cache_ttl_seconds: int = 300,
|
|
48
|
+
enable_cache: bool = True,
|
|
49
|
+
max_cache_entries: int = 1000,
|
|
50
|
+
scoring_weights: Optional[ScoringWeights] = None,
|
|
51
|
+
recency_half_life_days: float = 30.0,
|
|
52
|
+
min_score_threshold: float = 0.2,
|
|
53
|
+
):
|
|
54
|
+
"""
|
|
55
|
+
Initialize retrieval engine.
|
|
56
|
+
|
|
57
|
+
Args:
|
|
58
|
+
storage: Storage backend to query
|
|
59
|
+
embedding_provider: "local" (sentence-transformers) or "azure" (Azure OpenAI)
|
|
60
|
+
cache_ttl_seconds: How long to cache query results
|
|
61
|
+
enable_cache: Whether to enable caching
|
|
62
|
+
max_cache_entries: Maximum cache entries before eviction
|
|
63
|
+
scoring_weights: Custom weights for similarity/recency/success/confidence
|
|
64
|
+
recency_half_life_days: Days after which recency score halves
|
|
65
|
+
min_score_threshold: Minimum score to include in results
|
|
66
|
+
"""
|
|
67
|
+
self.storage = storage
|
|
68
|
+
self.embedding_provider = embedding_provider
|
|
69
|
+
self.min_score_threshold = min_score_threshold
|
|
70
|
+
self._embedder = None
|
|
71
|
+
|
|
72
|
+
# Initialize scorer
|
|
73
|
+
self.scorer = MemoryScorer(
|
|
74
|
+
weights=scoring_weights or ScoringWeights(),
|
|
75
|
+
recency_half_life_days=recency_half_life_days,
|
|
76
|
+
)
|
|
77
|
+
|
|
78
|
+
# Initialize cache
|
|
79
|
+
if enable_cache:
|
|
80
|
+
self.cache = RetrievalCache(
|
|
81
|
+
ttl_seconds=cache_ttl_seconds,
|
|
82
|
+
max_entries=max_cache_entries,
|
|
83
|
+
)
|
|
84
|
+
else:
|
|
85
|
+
self.cache = NullCache()
|
|
86
|
+
|
|
87
|
+
def retrieve(
|
|
88
|
+
self,
|
|
89
|
+
query: str,
|
|
90
|
+
agent: str,
|
|
91
|
+
project_id: str,
|
|
92
|
+
user_id: Optional[str] = None,
|
|
93
|
+
top_k: int = 5,
|
|
94
|
+
scope: Optional[MemoryScope] = None,
|
|
95
|
+
bypass_cache: bool = False,
|
|
96
|
+
include_shared: bool = True,
|
|
97
|
+
) -> MemorySlice:
|
|
98
|
+
"""
|
|
99
|
+
Retrieve relevant memories for a task.
|
|
100
|
+
|
|
101
|
+
Supports multi-agent memory sharing: if a scope is provided with
|
|
102
|
+
inherit_from agents, memories from those agents will also be included.
|
|
103
|
+
Shared memories have their origin tracked in the metadata['shared_from'] field.
|
|
104
|
+
|
|
105
|
+
Args:
|
|
106
|
+
query: Task description to find relevant memories for
|
|
107
|
+
agent: Agent requesting memories
|
|
108
|
+
project_id: Project context
|
|
109
|
+
user_id: Optional user for preference retrieval
|
|
110
|
+
top_k: Max items per memory type
|
|
111
|
+
scope: Agent's learning scope for filtering (enables multi-agent sharing)
|
|
112
|
+
bypass_cache: Skip cache lookup/storage
|
|
113
|
+
include_shared: If True and scope has inherit_from, include shared memories
|
|
114
|
+
|
|
115
|
+
Returns:
|
|
116
|
+
MemorySlice with relevant memories, scored and ranked
|
|
117
|
+
"""
|
|
118
|
+
start_time = time.time()
|
|
119
|
+
|
|
120
|
+
# Check cache first
|
|
121
|
+
if not bypass_cache:
|
|
122
|
+
cached = self.cache.get(query, agent, project_id, user_id, top_k)
|
|
123
|
+
if cached is not None:
|
|
124
|
+
cached.retrieval_time_ms = int((time.time() - start_time) * 1000)
|
|
125
|
+
logger.debug(f"Cache hit for query: {query[:50]}...")
|
|
126
|
+
return cached
|
|
127
|
+
|
|
128
|
+
# Generate embedding for query
|
|
129
|
+
query_embedding = self._get_embedding(query)
|
|
130
|
+
|
|
131
|
+
# Determine which agents to query based on scope
|
|
132
|
+
agents_to_query = [agent]
|
|
133
|
+
if include_shared and scope and scope.inherit_from:
|
|
134
|
+
agents_to_query = scope.get_readable_agents()
|
|
135
|
+
logger.debug(
|
|
136
|
+
f"Multi-agent retrieval for {agent}: querying {agents_to_query}"
|
|
137
|
+
)
|
|
138
|
+
|
|
139
|
+
# Retrieve raw items from storage (with vector search)
|
|
140
|
+
if len(agents_to_query) > 1:
|
|
141
|
+
# Use multi-agent query methods
|
|
142
|
+
raw_heuristics = self.storage.get_heuristics_for_agents(
|
|
143
|
+
project_id=project_id,
|
|
144
|
+
agents=agents_to_query,
|
|
145
|
+
embedding=query_embedding,
|
|
146
|
+
top_k=top_k * 2,
|
|
147
|
+
min_confidence=0.0,
|
|
148
|
+
)
|
|
149
|
+
raw_outcomes = self.storage.get_outcomes_for_agents(
|
|
150
|
+
project_id=project_id,
|
|
151
|
+
agents=agents_to_query,
|
|
152
|
+
embedding=query_embedding,
|
|
153
|
+
top_k=top_k * 2,
|
|
154
|
+
success_only=False,
|
|
155
|
+
)
|
|
156
|
+
raw_domain_knowledge = self.storage.get_domain_knowledge_for_agents(
|
|
157
|
+
project_id=project_id,
|
|
158
|
+
agents=agents_to_query,
|
|
159
|
+
embedding=query_embedding,
|
|
160
|
+
top_k=top_k * 2,
|
|
161
|
+
)
|
|
162
|
+
raw_anti_patterns = self.storage.get_anti_patterns_for_agents(
|
|
163
|
+
project_id=project_id,
|
|
164
|
+
agents=agents_to_query,
|
|
165
|
+
embedding=query_embedding,
|
|
166
|
+
top_k=top_k * 2,
|
|
167
|
+
)
|
|
168
|
+
|
|
169
|
+
# Mark shared memories with origin tracking
|
|
170
|
+
raw_heuristics = self._mark_shared_memories(raw_heuristics, agent)
|
|
171
|
+
raw_outcomes = self._mark_shared_memories(raw_outcomes, agent)
|
|
172
|
+
raw_domain_knowledge = self._mark_shared_memories(
|
|
173
|
+
raw_domain_knowledge, agent
|
|
174
|
+
)
|
|
175
|
+
raw_anti_patterns = self._mark_shared_memories(raw_anti_patterns, agent)
|
|
176
|
+
else:
|
|
177
|
+
# Single agent query (original behavior)
|
|
178
|
+
raw_heuristics = self.storage.get_heuristics(
|
|
179
|
+
project_id=project_id,
|
|
180
|
+
agent=agent,
|
|
181
|
+
embedding=query_embedding,
|
|
182
|
+
top_k=top_k * 2,
|
|
183
|
+
min_confidence=0.0,
|
|
184
|
+
)
|
|
185
|
+
raw_outcomes = self.storage.get_outcomes(
|
|
186
|
+
project_id=project_id,
|
|
187
|
+
agent=agent,
|
|
188
|
+
embedding=query_embedding,
|
|
189
|
+
top_k=top_k * 2,
|
|
190
|
+
success_only=False,
|
|
191
|
+
)
|
|
192
|
+
raw_domain_knowledge = self.storage.get_domain_knowledge(
|
|
193
|
+
project_id=project_id,
|
|
194
|
+
agent=agent,
|
|
195
|
+
embedding=query_embedding,
|
|
196
|
+
top_k=top_k * 2,
|
|
197
|
+
)
|
|
198
|
+
raw_anti_patterns = self.storage.get_anti_patterns(
|
|
199
|
+
project_id=project_id,
|
|
200
|
+
agent=agent,
|
|
201
|
+
embedding=query_embedding,
|
|
202
|
+
top_k=top_k * 2,
|
|
203
|
+
)
|
|
204
|
+
|
|
205
|
+
# Score and rank each type
|
|
206
|
+
scored_heuristics = self.scorer.score_heuristics(raw_heuristics)
|
|
207
|
+
scored_outcomes = self.scorer.score_outcomes(raw_outcomes)
|
|
208
|
+
scored_knowledge = self.scorer.score_domain_knowledge(raw_domain_knowledge)
|
|
209
|
+
scored_anti_patterns = self.scorer.score_anti_patterns(raw_anti_patterns)
|
|
210
|
+
|
|
211
|
+
# Apply threshold and limit
|
|
212
|
+
final_heuristics = self._extract_top_k(scored_heuristics, top_k)
|
|
213
|
+
final_outcomes = self._extract_top_k(scored_outcomes, top_k)
|
|
214
|
+
final_knowledge = self._extract_top_k(scored_knowledge, top_k)
|
|
215
|
+
final_anti_patterns = self._extract_top_k(scored_anti_patterns, top_k)
|
|
216
|
+
|
|
217
|
+
# Get user preferences (not scored, just retrieved)
|
|
218
|
+
preferences = []
|
|
219
|
+
if user_id:
|
|
220
|
+
preferences = self.storage.get_user_preferences(user_id=user_id)
|
|
221
|
+
|
|
222
|
+
retrieval_time_ms = int((time.time() - start_time) * 1000)
|
|
223
|
+
|
|
224
|
+
result = MemorySlice(
|
|
225
|
+
heuristics=final_heuristics,
|
|
226
|
+
outcomes=final_outcomes,
|
|
227
|
+
preferences=preferences,
|
|
228
|
+
domain_knowledge=final_knowledge,
|
|
229
|
+
anti_patterns=final_anti_patterns,
|
|
230
|
+
query=query,
|
|
231
|
+
agent=agent,
|
|
232
|
+
retrieval_time_ms=retrieval_time_ms,
|
|
233
|
+
)
|
|
234
|
+
|
|
235
|
+
# Cache result
|
|
236
|
+
if not bypass_cache:
|
|
237
|
+
self.cache.set(query, agent, project_id, result, user_id, top_k)
|
|
238
|
+
|
|
239
|
+
logger.info(
|
|
240
|
+
f"Retrieved {result.total_items} memories for '{query[:50]}...' "
|
|
241
|
+
f"in {retrieval_time_ms}ms"
|
|
242
|
+
)
|
|
243
|
+
|
|
244
|
+
return result
|
|
245
|
+
|
|
246
|
+
def retrieve_with_mode(
|
|
247
|
+
self,
|
|
248
|
+
query: str,
|
|
249
|
+
agent: str,
|
|
250
|
+
project_id: str,
|
|
251
|
+
mode: Optional[RetrievalMode] = None,
|
|
252
|
+
user_id: Optional[str] = None,
|
|
253
|
+
top_k: Optional[int] = None,
|
|
254
|
+
min_confidence: Optional[float] = None,
|
|
255
|
+
scope: Optional[MemoryScope] = None,
|
|
256
|
+
bypass_cache: bool = False,
|
|
257
|
+
include_shared: bool = True,
|
|
258
|
+
) -> Tuple[MemorySlice, RetrievalMode, str]:
|
|
259
|
+
"""
|
|
260
|
+
Retrieve memories using mode-aware strategy.
|
|
261
|
+
|
|
262
|
+
Different cognitive tasks require different retrieval approaches:
|
|
263
|
+
- BROAD: Planning, brainstorming - diverse, exploratory results
|
|
264
|
+
- PRECISE: Execution, implementation - high-confidence matches
|
|
265
|
+
- DIAGNOSTIC: Debugging, troubleshooting - anti-patterns and failures
|
|
266
|
+
- LEARNING: Pattern finding - similar memories for consolidation
|
|
267
|
+
- RECALL: Exact lookup - prioritizes exact matches
|
|
268
|
+
|
|
269
|
+
Args:
|
|
270
|
+
query: Task description to find relevant memories for
|
|
271
|
+
agent: Agent requesting memories
|
|
272
|
+
project_id: Project context
|
|
273
|
+
mode: Retrieval mode (auto-inferred if None)
|
|
274
|
+
user_id: Optional user for preference retrieval
|
|
275
|
+
top_k: Override mode's default top_k
|
|
276
|
+
min_confidence: Override mode's default min_confidence
|
|
277
|
+
scope: Agent's learning scope for filtering
|
|
278
|
+
bypass_cache: Skip cache lookup/storage
|
|
279
|
+
include_shared: Include memories from inherit_from agents
|
|
280
|
+
|
|
281
|
+
Returns:
|
|
282
|
+
Tuple of (MemorySlice, detected_mode, mode_reason)
|
|
283
|
+
"""
|
|
284
|
+
start_time = time.time()
|
|
285
|
+
|
|
286
|
+
# Auto-infer mode if not specified
|
|
287
|
+
if mode is None:
|
|
288
|
+
mode = infer_mode_from_query(query)
|
|
289
|
+
|
|
290
|
+
mode_reason = get_mode_reason(query, mode)
|
|
291
|
+
config = get_mode_config(mode)
|
|
292
|
+
|
|
293
|
+
# Apply overrides
|
|
294
|
+
effective_top_k = top_k if top_k is not None else config.top_k
|
|
295
|
+
effective_min_confidence = (
|
|
296
|
+
min_confidence if min_confidence is not None else config.min_confidence
|
|
297
|
+
)
|
|
298
|
+
|
|
299
|
+
# Store original settings
|
|
300
|
+
original_weights = self.scorer.weights
|
|
301
|
+
original_threshold = self.min_score_threshold
|
|
302
|
+
|
|
303
|
+
try:
|
|
304
|
+
# Apply mode-specific scoring weights
|
|
305
|
+
if config.weights:
|
|
306
|
+
self.scorer.weights = ScoringWeights(
|
|
307
|
+
similarity=config.weights.get("similarity", 0.4),
|
|
308
|
+
recency=config.weights.get("recency", 0.3),
|
|
309
|
+
success_rate=config.weights.get("success_rate", 0.2),
|
|
310
|
+
confidence=config.weights.get("confidence", 0.1),
|
|
311
|
+
)
|
|
312
|
+
self.min_score_threshold = effective_min_confidence
|
|
313
|
+
|
|
314
|
+
# Get extra candidates for diversity filtering
|
|
315
|
+
retrieval_k = effective_top_k
|
|
316
|
+
if config.diversity_factor > 0:
|
|
317
|
+
retrieval_k = effective_top_k * 3 # Get more for filtering
|
|
318
|
+
|
|
319
|
+
# Check cache (with mode in key would be ideal, but use bypass for now)
|
|
320
|
+
# Mode-aware caching could be added in future
|
|
321
|
+
if not bypass_cache:
|
|
322
|
+
cache_key_suffix = f"_mode_{mode.value}"
|
|
323
|
+
cached = self.cache.get(
|
|
324
|
+
query + cache_key_suffix,
|
|
325
|
+
agent,
|
|
326
|
+
project_id,
|
|
327
|
+
user_id,
|
|
328
|
+
effective_top_k,
|
|
329
|
+
)
|
|
330
|
+
if cached is not None:
|
|
331
|
+
cached.retrieval_time_ms = int((time.time() - start_time) * 1000)
|
|
332
|
+
logger.debug(f"Cache hit for mode-aware query: {query[:50]}...")
|
|
333
|
+
return cached, mode, mode_reason
|
|
334
|
+
|
|
335
|
+
# Generate embedding
|
|
336
|
+
query_embedding = self._get_embedding(query)
|
|
337
|
+
|
|
338
|
+
# Determine agents to query
|
|
339
|
+
agents_to_query = [agent]
|
|
340
|
+
if include_shared and scope and scope.inherit_from:
|
|
341
|
+
agents_to_query = scope.get_readable_agents()
|
|
342
|
+
|
|
343
|
+
# Retrieve raw items
|
|
344
|
+
if len(agents_to_query) > 1:
|
|
345
|
+
raw_heuristics = self.storage.get_heuristics_for_agents(
|
|
346
|
+
project_id=project_id,
|
|
347
|
+
agents=agents_to_query,
|
|
348
|
+
embedding=query_embedding,
|
|
349
|
+
top_k=retrieval_k * 2,
|
|
350
|
+
min_confidence=0.0,
|
|
351
|
+
)
|
|
352
|
+
raw_outcomes = self.storage.get_outcomes_for_agents(
|
|
353
|
+
project_id=project_id,
|
|
354
|
+
agents=agents_to_query,
|
|
355
|
+
embedding=query_embedding,
|
|
356
|
+
top_k=retrieval_k * 2,
|
|
357
|
+
success_only=False,
|
|
358
|
+
)
|
|
359
|
+
raw_domain_knowledge = self.storage.get_domain_knowledge_for_agents(
|
|
360
|
+
project_id=project_id,
|
|
361
|
+
agents=agents_to_query,
|
|
362
|
+
embedding=query_embedding,
|
|
363
|
+
top_k=retrieval_k * 2,
|
|
364
|
+
)
|
|
365
|
+
raw_anti_patterns = []
|
|
366
|
+
if config.include_anti_patterns:
|
|
367
|
+
raw_anti_patterns = self.storage.get_anti_patterns_for_agents(
|
|
368
|
+
project_id=project_id,
|
|
369
|
+
agents=agents_to_query,
|
|
370
|
+
embedding=query_embedding,
|
|
371
|
+
top_k=retrieval_k * 2,
|
|
372
|
+
)
|
|
373
|
+
|
|
374
|
+
# Mark shared memories
|
|
375
|
+
raw_heuristics = self._mark_shared_memories(raw_heuristics, agent)
|
|
376
|
+
raw_outcomes = self._mark_shared_memories(raw_outcomes, agent)
|
|
377
|
+
raw_domain_knowledge = self._mark_shared_memories(
|
|
378
|
+
raw_domain_knowledge, agent
|
|
379
|
+
)
|
|
380
|
+
raw_anti_patterns = self._mark_shared_memories(raw_anti_patterns, agent)
|
|
381
|
+
else:
|
|
382
|
+
raw_heuristics = self.storage.get_heuristics(
|
|
383
|
+
project_id=project_id,
|
|
384
|
+
agent=agent,
|
|
385
|
+
embedding=query_embedding,
|
|
386
|
+
top_k=retrieval_k * 2,
|
|
387
|
+
min_confidence=0.0,
|
|
388
|
+
)
|
|
389
|
+
raw_outcomes = self.storage.get_outcomes(
|
|
390
|
+
project_id=project_id,
|
|
391
|
+
agent=agent,
|
|
392
|
+
embedding=query_embedding,
|
|
393
|
+
top_k=retrieval_k * 2,
|
|
394
|
+
success_only=False,
|
|
395
|
+
)
|
|
396
|
+
raw_domain_knowledge = self.storage.get_domain_knowledge(
|
|
397
|
+
project_id=project_id,
|
|
398
|
+
agent=agent,
|
|
399
|
+
embedding=query_embedding,
|
|
400
|
+
top_k=retrieval_k * 2,
|
|
401
|
+
)
|
|
402
|
+
raw_anti_patterns = []
|
|
403
|
+
if config.include_anti_patterns:
|
|
404
|
+
raw_anti_patterns = self.storage.get_anti_patterns(
|
|
405
|
+
project_id=project_id,
|
|
406
|
+
agent=agent,
|
|
407
|
+
embedding=query_embedding,
|
|
408
|
+
top_k=retrieval_k * 2,
|
|
409
|
+
)
|
|
410
|
+
|
|
411
|
+
# Score items
|
|
412
|
+
scored_heuristics = self.scorer.score_heuristics(raw_heuristics)
|
|
413
|
+
scored_outcomes = self.scorer.score_outcomes(raw_outcomes)
|
|
414
|
+
scored_knowledge = self.scorer.score_domain_knowledge(raw_domain_knowledge)
|
|
415
|
+
scored_anti_patterns = self.scorer.score_anti_patterns(raw_anti_patterns)
|
|
416
|
+
|
|
417
|
+
# Apply mode-specific processing
|
|
418
|
+
if config.prioritize_failures:
|
|
419
|
+
scored_outcomes = self._boost_failures(scored_outcomes)
|
|
420
|
+
|
|
421
|
+
if config.exact_match_boost > 1.0:
|
|
422
|
+
scored_heuristics = self._apply_exact_match_boost(
|
|
423
|
+
scored_heuristics, config.exact_match_boost
|
|
424
|
+
)
|
|
425
|
+
scored_outcomes = self._apply_exact_match_boost(
|
|
426
|
+
scored_outcomes, config.exact_match_boost
|
|
427
|
+
)
|
|
428
|
+
scored_knowledge = self._apply_exact_match_boost(
|
|
429
|
+
scored_knowledge, config.exact_match_boost
|
|
430
|
+
)
|
|
431
|
+
|
|
432
|
+
# Apply diversity filtering if enabled
|
|
433
|
+
if config.diversity_factor > 0:
|
|
434
|
+
scored_heuristics = self._diversify_results(
|
|
435
|
+
scored_heuristics, config.diversity_factor
|
|
436
|
+
)
|
|
437
|
+
scored_outcomes = self._diversify_results(
|
|
438
|
+
scored_outcomes, config.diversity_factor
|
|
439
|
+
)
|
|
440
|
+
scored_knowledge = self._diversify_results(
|
|
441
|
+
scored_knowledge, config.diversity_factor
|
|
442
|
+
)
|
|
443
|
+
scored_anti_patterns = self._diversify_results(
|
|
444
|
+
scored_anti_patterns, config.diversity_factor
|
|
445
|
+
)
|
|
446
|
+
|
|
447
|
+
# Extract top-k with threshold
|
|
448
|
+
final_heuristics = self._extract_top_k(scored_heuristics, effective_top_k)
|
|
449
|
+
final_outcomes = self._extract_top_k(scored_outcomes, effective_top_k)
|
|
450
|
+
final_knowledge = self._extract_top_k(scored_knowledge, effective_top_k)
|
|
451
|
+
final_anti_patterns = self._extract_top_k(
|
|
452
|
+
scored_anti_patterns, effective_top_k
|
|
453
|
+
)
|
|
454
|
+
|
|
455
|
+
# Get user preferences
|
|
456
|
+
preferences = []
|
|
457
|
+
if user_id:
|
|
458
|
+
preferences = self.storage.get_user_preferences(user_id=user_id)
|
|
459
|
+
|
|
460
|
+
retrieval_time_ms = int((time.time() - start_time) * 1000)
|
|
461
|
+
|
|
462
|
+
result = MemorySlice(
|
|
463
|
+
heuristics=final_heuristics,
|
|
464
|
+
outcomes=final_outcomes,
|
|
465
|
+
preferences=preferences,
|
|
466
|
+
domain_knowledge=final_knowledge,
|
|
467
|
+
anti_patterns=final_anti_patterns,
|
|
468
|
+
query=query,
|
|
469
|
+
agent=agent,
|
|
470
|
+
retrieval_time_ms=retrieval_time_ms,
|
|
471
|
+
)
|
|
472
|
+
|
|
473
|
+
# Cache result
|
|
474
|
+
if not bypass_cache:
|
|
475
|
+
cache_key_suffix = f"_mode_{mode.value}"
|
|
476
|
+
self.cache.set(
|
|
477
|
+
query + cache_key_suffix,
|
|
478
|
+
agent,
|
|
479
|
+
project_id,
|
|
480
|
+
result,
|
|
481
|
+
user_id,
|
|
482
|
+
effective_top_k,
|
|
483
|
+
)
|
|
484
|
+
|
|
485
|
+
logger.info(
|
|
486
|
+
f"Mode-aware retrieval ({mode.value}): {result.total_items} memories "
|
|
487
|
+
f"for '{query[:50]}...' in {retrieval_time_ms}ms"
|
|
488
|
+
)
|
|
489
|
+
|
|
490
|
+
return result, mode, mode_reason
|
|
491
|
+
|
|
492
|
+
finally:
|
|
493
|
+
# Restore original settings
|
|
494
|
+
self.scorer.weights = original_weights
|
|
495
|
+
self.min_score_threshold = original_threshold
|
|
496
|
+
|
|
497
|
+
def _diversify_results(
|
|
498
|
+
self,
|
|
499
|
+
scored_items: List[ScoredItem],
|
|
500
|
+
diversity_factor: float,
|
|
501
|
+
) -> List[ScoredItem]:
|
|
502
|
+
"""
|
|
503
|
+
Apply MMR-style diversity filtering to reduce redundancy.
|
|
504
|
+
|
|
505
|
+
Maximal Marginal Relevance balances relevance with diversity
|
|
506
|
+
by penalizing items too similar to already-selected items.
|
|
507
|
+
|
|
508
|
+
Args:
|
|
509
|
+
scored_items: Scored and sorted items
|
|
510
|
+
diversity_factor: 0.0 = pure relevance, 1.0 = max diversity
|
|
511
|
+
|
|
512
|
+
Returns:
|
|
513
|
+
Reordered list with diversity applied
|
|
514
|
+
"""
|
|
515
|
+
if not scored_items or len(scored_items) <= 1 or diversity_factor == 0:
|
|
516
|
+
return scored_items
|
|
517
|
+
|
|
518
|
+
# Start with highest-scored item
|
|
519
|
+
selected = [scored_items[0]]
|
|
520
|
+
remaining = list(scored_items[1:])
|
|
521
|
+
|
|
522
|
+
while remaining:
|
|
523
|
+
best_idx = 0
|
|
524
|
+
best_mmr_score = float("-inf")
|
|
525
|
+
|
|
526
|
+
for i, candidate in enumerate(remaining):
|
|
527
|
+
# Find max similarity to any selected item
|
|
528
|
+
max_sim_to_selected = 0.0
|
|
529
|
+
for selected_item in selected:
|
|
530
|
+
# Use similarity scores as proxy for semantic similarity
|
|
531
|
+
# In a full implementation, we'd recompute embeddings
|
|
532
|
+
sim = self._estimate_similarity(candidate, selected_item)
|
|
533
|
+
max_sim_to_selected = max(max_sim_to_selected, sim)
|
|
534
|
+
|
|
535
|
+
# MMR score: relevance - diversity_factor * max_similarity
|
|
536
|
+
mmr_score = candidate.score - (diversity_factor * max_sim_to_selected)
|
|
537
|
+
|
|
538
|
+
if mmr_score > best_mmr_score:
|
|
539
|
+
best_mmr_score = mmr_score
|
|
540
|
+
best_idx = i
|
|
541
|
+
|
|
542
|
+
selected.append(remaining.pop(best_idx))
|
|
543
|
+
|
|
544
|
+
return selected
|
|
545
|
+
|
|
546
|
+
def _estimate_similarity(
|
|
547
|
+
self,
|
|
548
|
+
item1: ScoredItem,
|
|
549
|
+
item2: ScoredItem,
|
|
550
|
+
) -> float:
|
|
551
|
+
"""
|
|
552
|
+
Estimate semantic similarity between two items.
|
|
553
|
+
|
|
554
|
+
Uses similarity scores as a proxy. For more accurate results,
|
|
555
|
+
would need to recompute embeddings (expensive).
|
|
556
|
+
|
|
557
|
+
Args:
|
|
558
|
+
item1: First scored item
|
|
559
|
+
item2: Second scored item
|
|
560
|
+
|
|
561
|
+
Returns:
|
|
562
|
+
Estimated similarity (0-1)
|
|
563
|
+
"""
|
|
564
|
+
# Use the geometric mean of similarity scores as a rough estimate
|
|
565
|
+
# Items with similar relevance to query are likely similar to each other
|
|
566
|
+
sim1 = item1.similarity_score
|
|
567
|
+
sim2 = item2.similarity_score
|
|
568
|
+
|
|
569
|
+
# If both are highly similar to query, they're likely similar to each other
|
|
570
|
+
# This is a heuristic - proper MMR would use actual pairwise similarity
|
|
571
|
+
if sim1 > 0 and sim2 > 0:
|
|
572
|
+
return (sim1 * sim2) ** 0.5
|
|
573
|
+
return 0.0
|
|
574
|
+
|
|
575
|
+
def _boost_failures(
|
|
576
|
+
self,
|
|
577
|
+
scored_outcomes: List[ScoredItem],
|
|
578
|
+
) -> List[ScoredItem]:
|
|
579
|
+
"""
|
|
580
|
+
Boost failed outcomes for diagnostic mode.
|
|
581
|
+
|
|
582
|
+
In debugging contexts, failures are valuable learning opportunities.
|
|
583
|
+
|
|
584
|
+
Args:
|
|
585
|
+
scored_outcomes: Scored outcomes
|
|
586
|
+
|
|
587
|
+
Returns:
|
|
588
|
+
Outcomes with failures boosted
|
|
589
|
+
"""
|
|
590
|
+
boosted = []
|
|
591
|
+
for item in scored_outcomes:
|
|
592
|
+
outcome = item.item
|
|
593
|
+
# Check if this is a failed outcome
|
|
594
|
+
if hasattr(outcome, "success") and not outcome.success:
|
|
595
|
+
# Boost the score by 50%
|
|
596
|
+
boosted.append(
|
|
597
|
+
ScoredItem(
|
|
598
|
+
item=item.item,
|
|
599
|
+
score=item.score * 1.5,
|
|
600
|
+
similarity_score=item.similarity_score,
|
|
601
|
+
recency_score=item.recency_score,
|
|
602
|
+
success_score=item.success_score,
|
|
603
|
+
confidence_score=item.confidence_score,
|
|
604
|
+
)
|
|
605
|
+
)
|
|
606
|
+
else:
|
|
607
|
+
boosted.append(item)
|
|
608
|
+
|
|
609
|
+
# Re-sort after boosting
|
|
610
|
+
return sorted(boosted, key=lambda x: -x.score)
|
|
611
|
+
|
|
612
|
+
def _apply_exact_match_boost(
|
|
613
|
+
self,
|
|
614
|
+
scored_items: List[ScoredItem],
|
|
615
|
+
boost_factor: float,
|
|
616
|
+
) -> List[ScoredItem]:
|
|
617
|
+
"""
|
|
618
|
+
Boost items with very high similarity scores.
|
|
619
|
+
|
|
620
|
+
For PRECISE and RECALL modes, we want to strongly prefer
|
|
621
|
+
near-exact matches.
|
|
622
|
+
|
|
623
|
+
Args:
|
|
624
|
+
scored_items: Scored items
|
|
625
|
+
boost_factor: Multiplier for high-similarity items
|
|
626
|
+
|
|
627
|
+
Returns:
|
|
628
|
+
Items with exact matches boosted
|
|
629
|
+
"""
|
|
630
|
+
boosted = []
|
|
631
|
+
for item in scored_items:
|
|
632
|
+
# Boost items with >0.9 similarity
|
|
633
|
+
if item.similarity_score > 0.9:
|
|
634
|
+
boosted.append(
|
|
635
|
+
ScoredItem(
|
|
636
|
+
item=item.item,
|
|
637
|
+
score=item.score * boost_factor,
|
|
638
|
+
similarity_score=item.similarity_score,
|
|
639
|
+
recency_score=item.recency_score,
|
|
640
|
+
success_score=item.success_score,
|
|
641
|
+
confidence_score=item.confidence_score,
|
|
642
|
+
)
|
|
643
|
+
)
|
|
644
|
+
# Smaller boost for >0.8 similarity
|
|
645
|
+
elif item.similarity_score > 0.8:
|
|
646
|
+
boosted.append(
|
|
647
|
+
ScoredItem(
|
|
648
|
+
item=item.item,
|
|
649
|
+
score=item.score * (1 + (boost_factor - 1) / 2),
|
|
650
|
+
similarity_score=item.similarity_score,
|
|
651
|
+
recency_score=item.recency_score,
|
|
652
|
+
success_score=item.success_score,
|
|
653
|
+
confidence_score=item.confidence_score,
|
|
654
|
+
)
|
|
655
|
+
)
|
|
656
|
+
else:
|
|
657
|
+
boosted.append(item)
|
|
658
|
+
|
|
659
|
+
# Re-sort after boosting
|
|
660
|
+
return sorted(boosted, key=lambda x: -x.score)
|
|
661
|
+
|
|
662
|
+
def _mark_shared_memories(
|
|
663
|
+
self,
|
|
664
|
+
memories: List[Any],
|
|
665
|
+
requesting_agent: str,
|
|
666
|
+
) -> List[Any]:
|
|
667
|
+
"""
|
|
668
|
+
Mark memories that came from other agents with their origin.
|
|
669
|
+
|
|
670
|
+
Adds 'shared_from' to metadata for memories not owned by requesting_agent.
|
|
671
|
+
This maintains write isolation - only the owning agent can modify their memories.
|
|
672
|
+
|
|
673
|
+
Args:
|
|
674
|
+
memories: List of memory objects (Heuristic, Outcome, etc.)
|
|
675
|
+
requesting_agent: The agent that requested the memories
|
|
676
|
+
|
|
677
|
+
Returns:
|
|
678
|
+
Same memories with shared_from metadata added where applicable
|
|
679
|
+
"""
|
|
680
|
+
for memory in memories:
|
|
681
|
+
if hasattr(memory, "agent") and memory.agent != requesting_agent:
|
|
682
|
+
if not hasattr(memory, "metadata") or memory.metadata is None:
|
|
683
|
+
memory.metadata = {}
|
|
684
|
+
memory.metadata["shared_from"] = memory.agent
|
|
685
|
+
return memories
|
|
686
|
+
|
|
687
|
+
def _extract_top_k(
|
|
688
|
+
self,
|
|
689
|
+
scored_items: List[ScoredItem],
|
|
690
|
+
top_k: int,
|
|
691
|
+
) -> List[Any]:
|
|
692
|
+
"""
|
|
693
|
+
Extract top-k items after filtering by score threshold.
|
|
694
|
+
|
|
695
|
+
Args:
|
|
696
|
+
scored_items: Scored and sorted items
|
|
697
|
+
top_k: Maximum number to return
|
|
698
|
+
|
|
699
|
+
Returns:
|
|
700
|
+
List of original items (unwrapped from ScoredItem)
|
|
701
|
+
"""
|
|
702
|
+
filtered = self.scorer.apply_score_threshold(
|
|
703
|
+
scored_items, self.min_score_threshold
|
|
704
|
+
)
|
|
705
|
+
return [item.item for item in filtered[:top_k]]
|
|
706
|
+
|
|
707
|
+
def _get_embedding(self, text: str) -> List[float]:
|
|
708
|
+
"""
|
|
709
|
+
Generate embedding for text.
|
|
710
|
+
|
|
711
|
+
Uses lazy initialization of embedding model.
|
|
712
|
+
"""
|
|
713
|
+
if self._embedder is None:
|
|
714
|
+
self._embedder = self._init_embedder()
|
|
715
|
+
|
|
716
|
+
start_time = time.time()
|
|
717
|
+
embedding = self._embedder.encode(text)
|
|
718
|
+
duration_ms = (time.time() - start_time) * 1000
|
|
719
|
+
|
|
720
|
+
# Record embedding generation metrics
|
|
721
|
+
metrics = get_metrics()
|
|
722
|
+
metrics.record_embedding_latency(
|
|
723
|
+
duration_ms=duration_ms,
|
|
724
|
+
provider=self.embedding_provider,
|
|
725
|
+
batch_size=1,
|
|
726
|
+
)
|
|
727
|
+
|
|
728
|
+
return embedding
|
|
729
|
+
|
|
730
|
+
def _init_embedder(self):
|
|
731
|
+
"""Initialize the embedding model based on provider config."""
|
|
732
|
+
if self.embedding_provider == "azure":
|
|
733
|
+
from alma.retrieval.embeddings import AzureEmbedder
|
|
734
|
+
|
|
735
|
+
embedder = AzureEmbedder()
|
|
736
|
+
elif self.embedding_provider == "mock":
|
|
737
|
+
from alma.retrieval.embeddings import MockEmbedder
|
|
738
|
+
|
|
739
|
+
embedder = MockEmbedder()
|
|
740
|
+
else:
|
|
741
|
+
from alma.retrieval.embeddings import LocalEmbedder
|
|
742
|
+
|
|
743
|
+
embedder = LocalEmbedder()
|
|
744
|
+
|
|
745
|
+
# Validate embedding dimension matches storage configuration
|
|
746
|
+
self._validate_embedding_dimension(embedder)
|
|
747
|
+
return embedder
|
|
748
|
+
|
|
749
|
+
def _validate_embedding_dimension(self, embedder) -> None:
|
|
750
|
+
"""
|
|
751
|
+
Validate that embedding provider dimension matches storage configuration.
|
|
752
|
+
|
|
753
|
+
Raises:
|
|
754
|
+
ValueError: If dimensions don't match
|
|
755
|
+
"""
|
|
756
|
+
provider_dim = embedder.dimension
|
|
757
|
+
|
|
758
|
+
# Check if storage has embedding_dim attribute
|
|
759
|
+
storage_dim = getattr(self.storage, "embedding_dim", None)
|
|
760
|
+
if storage_dim is None:
|
|
761
|
+
logger.debug(
|
|
762
|
+
"Storage backend doesn't specify embedding_dim, skipping validation"
|
|
763
|
+
)
|
|
764
|
+
return
|
|
765
|
+
|
|
766
|
+
# Skip validation if storage_dim is not an integer (e.g., mock objects)
|
|
767
|
+
if not isinstance(storage_dim, int):
|
|
768
|
+
logger.debug(
|
|
769
|
+
f"Storage embedding_dim is not an integer ({type(storage_dim)}), "
|
|
770
|
+
"skipping validation"
|
|
771
|
+
)
|
|
772
|
+
return
|
|
773
|
+
|
|
774
|
+
if provider_dim != storage_dim:
|
|
775
|
+
raise ValueError(
|
|
776
|
+
f"Embedding dimension mismatch: provider '{self.embedding_provider}' "
|
|
777
|
+
f"outputs {provider_dim} dimensions, but storage is configured for "
|
|
778
|
+
f"{storage_dim} dimensions. Update your config's embedding_dim to "
|
|
779
|
+
f"match the provider, or use a different embedding provider.\n"
|
|
780
|
+
f" - local (all-MiniLM-L6-v2): 384 dimensions\n"
|
|
781
|
+
f" - azure (text-embedding-3-small): 1536 dimensions"
|
|
782
|
+
)
|
|
783
|
+
|
|
784
|
+
logger.info(
|
|
785
|
+
f"Embedding dimension validated: {provider_dim} "
|
|
786
|
+
f"(provider: {self.embedding_provider})"
|
|
787
|
+
)
|
|
788
|
+
|
|
789
|
+
def invalidate_cache(
|
|
790
|
+
self,
|
|
791
|
+
agent: Optional[str] = None,
|
|
792
|
+
project_id: Optional[str] = None,
|
|
793
|
+
):
|
|
794
|
+
"""
|
|
795
|
+
Invalidate cache entries.
|
|
796
|
+
|
|
797
|
+
Should be called after memory updates to ensure fresh results.
|
|
798
|
+
|
|
799
|
+
Args:
|
|
800
|
+
agent: Invalidate entries for this agent
|
|
801
|
+
project_id: Invalidate entries for this project
|
|
802
|
+
"""
|
|
803
|
+
self.cache.invalidate(agent=agent, project_id=project_id)
|
|
804
|
+
|
|
805
|
+
def get_cache_stats(self) -> Dict[str, Any]:
|
|
806
|
+
"""Get cache performance statistics."""
|
|
807
|
+
stats = self.cache.get_stats()
|
|
808
|
+
return stats.to_dict()
|
|
809
|
+
|
|
810
|
+
def clear_cache(self):
|
|
811
|
+
"""Clear all cached results."""
|
|
812
|
+
self.cache.clear()
|
|
813
|
+
|
|
814
|
+
def get_scorer_weights(self) -> Dict[str, float]:
|
|
815
|
+
"""Get current scoring weights."""
|
|
816
|
+
w = self.scorer.weights
|
|
817
|
+
return {
|
|
818
|
+
"similarity": w.similarity,
|
|
819
|
+
"recency": w.recency,
|
|
820
|
+
"success_rate": w.success_rate,
|
|
821
|
+
"confidence": w.confidence,
|
|
822
|
+
}
|
|
823
|
+
|
|
824
|
+
def update_scorer_weights(
|
|
825
|
+
self,
|
|
826
|
+
similarity: Optional[float] = None,
|
|
827
|
+
recency: Optional[float] = None,
|
|
828
|
+
success_rate: Optional[float] = None,
|
|
829
|
+
confidence: Optional[float] = None,
|
|
830
|
+
):
|
|
831
|
+
"""
|
|
832
|
+
Update scoring weights (will be normalized to sum to 1.0).
|
|
833
|
+
|
|
834
|
+
Args:
|
|
835
|
+
similarity: Weight for semantic similarity
|
|
836
|
+
recency: Weight for recency
|
|
837
|
+
success_rate: Weight for success rate
|
|
838
|
+
confidence: Weight for stored confidence
|
|
839
|
+
"""
|
|
840
|
+
current = self.scorer.weights
|
|
841
|
+
self.scorer.weights = ScoringWeights(
|
|
842
|
+
similarity=similarity if similarity is not None else current.similarity,
|
|
843
|
+
recency=recency if recency is not None else current.recency,
|
|
844
|
+
success_rate=(
|
|
845
|
+
success_rate if success_rate is not None else current.success_rate
|
|
846
|
+
),
|
|
847
|
+
confidence=confidence if confidence is not None else current.confidence,
|
|
848
|
+
)
|
|
849
|
+
# Clear cache since scoring changed
|
|
850
|
+
self.cache.clear()
|