alma-memory 0.5.1__py3-none-any.whl → 0.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- alma/__init__.py +296 -226
- alma/compression/__init__.py +33 -0
- alma/compression/pipeline.py +980 -0
- alma/confidence/__init__.py +47 -47
- alma/confidence/engine.py +540 -540
- alma/confidence/types.py +351 -351
- alma/config/loader.py +157 -157
- alma/consolidation/__init__.py +23 -23
- alma/consolidation/engine.py +678 -678
- alma/consolidation/prompts.py +84 -84
- alma/core.py +1189 -430
- alma/domains/__init__.py +30 -30
- alma/domains/factory.py +359 -359
- alma/domains/schemas.py +448 -448
- alma/domains/types.py +272 -272
- alma/events/__init__.py +75 -75
- alma/events/emitter.py +285 -284
- alma/events/storage_mixin.py +246 -246
- alma/events/types.py +126 -126
- alma/events/webhook.py +425 -425
- alma/exceptions.py +49 -49
- alma/extraction/__init__.py +31 -31
- alma/extraction/auto_learner.py +265 -265
- alma/extraction/extractor.py +420 -420
- alma/graph/__init__.py +106 -106
- alma/graph/backends/__init__.py +32 -32
- alma/graph/backends/kuzu.py +624 -624
- alma/graph/backends/memgraph.py +432 -432
- alma/graph/backends/memory.py +236 -236
- alma/graph/backends/neo4j.py +417 -417
- alma/graph/base.py +159 -159
- alma/graph/extraction.py +198 -198
- alma/graph/store.py +860 -860
- alma/harness/__init__.py +35 -35
- alma/harness/base.py +386 -386
- alma/harness/domains.py +705 -705
- alma/initializer/__init__.py +37 -37
- alma/initializer/initializer.py +418 -418
- alma/initializer/types.py +250 -250
- alma/integration/__init__.py +62 -62
- alma/integration/claude_agents.py +444 -444
- alma/integration/helena.py +423 -423
- alma/integration/victor.py +471 -471
- alma/learning/__init__.py +101 -86
- alma/learning/decay.py +878 -0
- alma/learning/forgetting.py +1446 -1446
- alma/learning/heuristic_extractor.py +390 -390
- alma/learning/protocols.py +374 -374
- alma/learning/validation.py +346 -346
- alma/mcp/__init__.py +123 -45
- alma/mcp/__main__.py +156 -156
- alma/mcp/resources.py +122 -122
- alma/mcp/server.py +955 -591
- alma/mcp/tools.py +3254 -509
- alma/observability/__init__.py +91 -84
- alma/observability/config.py +302 -302
- alma/observability/guidelines.py +170 -0
- alma/observability/logging.py +424 -424
- alma/observability/metrics.py +583 -583
- alma/observability/tracing.py +440 -440
- alma/progress/__init__.py +21 -21
- alma/progress/tracker.py +607 -607
- alma/progress/types.py +250 -250
- alma/retrieval/__init__.py +134 -53
- alma/retrieval/budget.py +525 -0
- alma/retrieval/cache.py +1304 -1061
- alma/retrieval/embeddings.py +202 -202
- alma/retrieval/engine.py +850 -427
- alma/retrieval/modes.py +365 -0
- alma/retrieval/progressive.py +560 -0
- alma/retrieval/scoring.py +344 -344
- alma/retrieval/trust_scoring.py +637 -0
- alma/retrieval/verification.py +797 -0
- alma/session/__init__.py +19 -19
- alma/session/manager.py +442 -399
- alma/session/types.py +288 -288
- alma/storage/__init__.py +101 -90
- alma/storage/archive.py +233 -0
- alma/storage/azure_cosmos.py +1259 -1259
- alma/storage/base.py +1083 -583
- alma/storage/chroma.py +1443 -1443
- alma/storage/constants.py +103 -103
- alma/storage/file_based.py +614 -614
- alma/storage/migrations/__init__.py +21 -21
- alma/storage/migrations/base.py +321 -321
- alma/storage/migrations/runner.py +323 -323
- alma/storage/migrations/version_stores.py +337 -337
- alma/storage/migrations/versions/__init__.py +11 -11
- alma/storage/migrations/versions/v1_0_0.py +373 -373
- alma/storage/migrations/versions/v1_1_0_workflow_context.py +551 -0
- alma/storage/pinecone.py +1080 -1080
- alma/storage/postgresql.py +1948 -1559
- alma/storage/qdrant.py +1306 -1306
- alma/storage/sqlite_local.py +3041 -1457
- alma/testing/__init__.py +46 -46
- alma/testing/factories.py +301 -301
- alma/testing/mocks.py +389 -389
- alma/types.py +292 -264
- alma/utils/__init__.py +19 -0
- alma/utils/tokenizer.py +521 -0
- alma/workflow/__init__.py +83 -0
- alma/workflow/artifacts.py +170 -0
- alma/workflow/checkpoint.py +311 -0
- alma/workflow/context.py +228 -0
- alma/workflow/outcomes.py +189 -0
- alma/workflow/reducers.py +393 -0
- {alma_memory-0.5.1.dist-info → alma_memory-0.7.0.dist-info}/METADATA +210 -72
- alma_memory-0.7.0.dist-info/RECORD +112 -0
- alma_memory-0.5.1.dist-info/RECORD +0 -93
- {alma_memory-0.5.1.dist-info → alma_memory-0.7.0.dist-info}/WHEEL +0 -0
- {alma_memory-0.5.1.dist-info → alma_memory-0.7.0.dist-info}/top_level.txt +0 -0
alma/storage/azure_cosmos.py
CHANGED
|
@@ -1,1259 +1,1259 @@
|
|
|
1
|
-
"""
|
|
2
|
-
ALMA Azure Cosmos DB Storage Backend.
|
|
3
|
-
|
|
4
|
-
Production storage using Azure Cosmos DB with vector search capabilities.
|
|
5
|
-
Uses Azure Key Vault for secrets management.
|
|
6
|
-
|
|
7
|
-
Requirements:
|
|
8
|
-
pip install azure-cosmos azure-identity azure-keyvault-secrets
|
|
9
|
-
|
|
10
|
-
Configuration (config.yaml):
|
|
11
|
-
alma:
|
|
12
|
-
storage: azure
|
|
13
|
-
azure:
|
|
14
|
-
endpoint: ${AZURE_COSMOS_ENDPOINT}
|
|
15
|
-
key: ${KEYVAULT:cosmos-db-key}
|
|
16
|
-
database: alma-memory
|
|
17
|
-
embedding_dim: 384
|
|
18
|
-
"""
|
|
19
|
-
|
|
20
|
-
import logging
|
|
21
|
-
from datetime import datetime, timezone
|
|
22
|
-
from typing import Any, Dict, List, Optional
|
|
23
|
-
|
|
24
|
-
from alma.storage.base import StorageBackend
|
|
25
|
-
from alma.storage.constants import AZURE_COSMOS_CONTAINER_NAMES, MemoryType
|
|
26
|
-
from alma.types import (
|
|
27
|
-
AntiPattern,
|
|
28
|
-
DomainKnowledge,
|
|
29
|
-
Heuristic,
|
|
30
|
-
Outcome,
|
|
31
|
-
UserPreference,
|
|
32
|
-
)
|
|
33
|
-
|
|
34
|
-
logger = logging.getLogger(__name__)
|
|
35
|
-
|
|
36
|
-
# Try to import Azure SDK
|
|
37
|
-
try:
|
|
38
|
-
from azure.cosmos import CosmosClient, PartitionKey, exceptions
|
|
39
|
-
from azure.cosmos.container import ContainerProxy
|
|
40
|
-
from azure.cosmos.database import DatabaseProxy
|
|
41
|
-
|
|
42
|
-
AZURE_COSMOS_AVAILABLE = True
|
|
43
|
-
except ImportError:
|
|
44
|
-
AZURE_COSMOS_AVAILABLE = False
|
|
45
|
-
# Define placeholders for type hints when SDK not available
|
|
46
|
-
CosmosClient = None # type: ignore
|
|
47
|
-
PartitionKey = None # type: ignore
|
|
48
|
-
exceptions = None # type: ignore
|
|
49
|
-
ContainerProxy = Any # type: ignore
|
|
50
|
-
DatabaseProxy = Any # type: ignore
|
|
51
|
-
logger.warning(
|
|
52
|
-
"azure-cosmos package not installed. Install with: pip install azure-cosmos"
|
|
53
|
-
)
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
class AzureCosmosStorage(StorageBackend):
|
|
57
|
-
"""
|
|
58
|
-
Azure Cosmos DB storage backend with vector search.
|
|
59
|
-
|
|
60
|
-
Uses:
|
|
61
|
-
- NoSQL API for document storage
|
|
62
|
-
- DiskANN vector indexing for similarity search
|
|
63
|
-
- Partition key: project_id for efficient queries
|
|
64
|
-
|
|
65
|
-
Container structure (uses canonical memory type names with alma_ prefix):
|
|
66
|
-
- alma_heuristics: Heuristics with vector embeddings
|
|
67
|
-
- alma_outcomes: Task outcomes with vector embeddings
|
|
68
|
-
- alma_preferences: User preferences (no vectors)
|
|
69
|
-
- alma_domain_knowledge: Domain knowledge with vector embeddings
|
|
70
|
-
- alma_anti_patterns: Anti-patterns with vector embeddings
|
|
71
|
-
|
|
72
|
-
Container names are derived from alma.storage.constants.AZURE_COSMOS_CONTAINER_NAMES
|
|
73
|
-
for consistency across all storage backends.
|
|
74
|
-
"""
|
|
75
|
-
|
|
76
|
-
# Use canonical container names from constants
|
|
77
|
-
CONTAINER_NAMES = AZURE_COSMOS_CONTAINER_NAMES
|
|
78
|
-
|
|
79
|
-
def __init__(
|
|
80
|
-
self,
|
|
81
|
-
endpoint: str,
|
|
82
|
-
key: str,
|
|
83
|
-
database_name: str = "alma-memory",
|
|
84
|
-
embedding_dim: int = 384,
|
|
85
|
-
create_if_not_exists: bool = True,
|
|
86
|
-
):
|
|
87
|
-
"""
|
|
88
|
-
Initialize Azure Cosmos DB storage.
|
|
89
|
-
|
|
90
|
-
Args:
|
|
91
|
-
endpoint: Cosmos DB account endpoint
|
|
92
|
-
key: Cosmos DB account key
|
|
93
|
-
database_name: Name of the database
|
|
94
|
-
embedding_dim: Dimension of embedding vectors
|
|
95
|
-
create_if_not_exists: Create database/containers if missing
|
|
96
|
-
"""
|
|
97
|
-
if not AZURE_COSMOS_AVAILABLE:
|
|
98
|
-
raise ImportError(
|
|
99
|
-
"azure-cosmos package required. Install with: pip install azure-cosmos"
|
|
100
|
-
)
|
|
101
|
-
|
|
102
|
-
self.endpoint = endpoint
|
|
103
|
-
self.database_name = database_name
|
|
104
|
-
self.embedding_dim = embedding_dim
|
|
105
|
-
|
|
106
|
-
# Initialize client
|
|
107
|
-
self.client = CosmosClient(endpoint, credential=key)
|
|
108
|
-
|
|
109
|
-
# Get or create database
|
|
110
|
-
if create_if_not_exists:
|
|
111
|
-
self.database = self.client.create_database_if_not_exists(id=database_name)
|
|
112
|
-
self._init_containers()
|
|
113
|
-
else:
|
|
114
|
-
self.database = self.client.get_database_client(database_name)
|
|
115
|
-
|
|
116
|
-
# Cache container clients
|
|
117
|
-
self._containers: Dict[str, ContainerProxy] = {}
|
|
118
|
-
for key_name, container_name in self.CONTAINER_NAMES.items():
|
|
119
|
-
self._containers[key_name] = self.database.get_container_client(
|
|
120
|
-
container_name
|
|
121
|
-
)
|
|
122
|
-
|
|
123
|
-
# Cache for partition key mappings: {container_key: {doc_id: partition_key}}
|
|
124
|
-
# This reduces RU consumption by avoiding cross-partition queries
|
|
125
|
-
self._partition_key_cache: Dict[str, Dict[str, str]] = {
|
|
126
|
-
mt: {} for mt in MemoryType.ALL
|
|
127
|
-
}
|
|
128
|
-
# Maximum cache size per container to prevent memory issues
|
|
129
|
-
self._cache_max_size = 1000
|
|
130
|
-
|
|
131
|
-
logger.info(f"Connected to Azure Cosmos DB: {database_name}")
|
|
132
|
-
|
|
133
|
-
@classmethod
|
|
134
|
-
def from_config(cls, config: Dict[str, Any]) -> "AzureCosmosStorage":
|
|
135
|
-
"""Create instance from configuration."""
|
|
136
|
-
azure_config = config.get("azure", {})
|
|
137
|
-
|
|
138
|
-
endpoint = azure_config.get("endpoint")
|
|
139
|
-
key = azure_config.get("key")
|
|
140
|
-
|
|
141
|
-
if not endpoint or not key:
|
|
142
|
-
raise ValueError(
|
|
143
|
-
"Azure Cosmos DB requires 'azure.endpoint' and 'azure.key' in config"
|
|
144
|
-
)
|
|
145
|
-
|
|
146
|
-
return cls(
|
|
147
|
-
endpoint=endpoint,
|
|
148
|
-
key=key,
|
|
149
|
-
database_name=azure_config.get("database", "alma-memory"),
|
|
150
|
-
embedding_dim=azure_config.get("embedding_dim", 384),
|
|
151
|
-
create_if_not_exists=azure_config.get("create_if_not_exists", True),
|
|
152
|
-
)
|
|
153
|
-
|
|
154
|
-
def _init_containers(self):
|
|
155
|
-
"""Initialize containers with vector search indexing."""
|
|
156
|
-
# Container configs with indexing policies (using canonical memory types)
|
|
157
|
-
container_configs = {
|
|
158
|
-
MemoryType.HEURISTICS: {
|
|
159
|
-
"partition_key": "/project_id",
|
|
160
|
-
"vector_path": "/embedding",
|
|
161
|
-
"vector_indexes": True,
|
|
162
|
-
},
|
|
163
|
-
MemoryType.OUTCOMES: {
|
|
164
|
-
"partition_key": "/project_id",
|
|
165
|
-
"vector_path": "/embedding",
|
|
166
|
-
"vector_indexes": True,
|
|
167
|
-
},
|
|
168
|
-
MemoryType.PREFERENCES: {
|
|
169
|
-
"partition_key": "/user_id",
|
|
170
|
-
"vector_path": None,
|
|
171
|
-
"vector_indexes": False,
|
|
172
|
-
},
|
|
173
|
-
MemoryType.DOMAIN_KNOWLEDGE: {
|
|
174
|
-
"partition_key": "/project_id",
|
|
175
|
-
"vector_path": "/embedding",
|
|
176
|
-
"vector_indexes": True,
|
|
177
|
-
},
|
|
178
|
-
MemoryType.ANTI_PATTERNS: {
|
|
179
|
-
"partition_key": "/project_id",
|
|
180
|
-
"vector_path": "/embedding",
|
|
181
|
-
"vector_indexes": True,
|
|
182
|
-
},
|
|
183
|
-
}
|
|
184
|
-
|
|
185
|
-
for key_name, cfg in container_configs.items():
|
|
186
|
-
container_name = self.CONTAINER_NAMES[key_name]
|
|
187
|
-
|
|
188
|
-
# Build indexing policy
|
|
189
|
-
indexing_policy = {
|
|
190
|
-
"indexingMode": "consistent",
|
|
191
|
-
"automatic": True,
|
|
192
|
-
"includedPaths": [{"path": "/*"}],
|
|
193
|
-
"excludedPaths": [{"path": '/"_etag"/?'}],
|
|
194
|
-
}
|
|
195
|
-
|
|
196
|
-
# Add vector embedding policy if needed
|
|
197
|
-
vector_embedding_policy = None
|
|
198
|
-
if cfg["vector_indexes"] and cfg["vector_path"]:
|
|
199
|
-
# Exclude vector path from regular indexing
|
|
200
|
-
indexing_policy["excludedPaths"].append(
|
|
201
|
-
{"path": f"{cfg['vector_path']}/*"}
|
|
202
|
-
)
|
|
203
|
-
|
|
204
|
-
# Vector embedding policy for DiskANN
|
|
205
|
-
vector_embedding_policy = {
|
|
206
|
-
"vectorEmbeddings": [
|
|
207
|
-
{
|
|
208
|
-
"path": cfg["vector_path"],
|
|
209
|
-
"dataType": "float32",
|
|
210
|
-
"dimensions": self.embedding_dim,
|
|
211
|
-
"distanceFunction": "cosine",
|
|
212
|
-
}
|
|
213
|
-
]
|
|
214
|
-
}
|
|
215
|
-
|
|
216
|
-
try:
|
|
217
|
-
container_properties = {
|
|
218
|
-
"id": container_name,
|
|
219
|
-
"partition_key": PartitionKey(path=cfg["partition_key"]),
|
|
220
|
-
"indexing_policy": indexing_policy,
|
|
221
|
-
}
|
|
222
|
-
|
|
223
|
-
if vector_embedding_policy:
|
|
224
|
-
container_properties["vector_embedding_policy"] = (
|
|
225
|
-
vector_embedding_policy
|
|
226
|
-
)
|
|
227
|
-
|
|
228
|
-
self.database.create_container_if_not_exists(**container_properties)
|
|
229
|
-
logger.debug(f"Container ready: {container_name}")
|
|
230
|
-
|
|
231
|
-
except exceptions.CosmosHttpResponseError as e:
|
|
232
|
-
if e.status_code == 409:
|
|
233
|
-
logger.debug(f"Container already exists: {container_name}")
|
|
234
|
-
else:
|
|
235
|
-
raise
|
|
236
|
-
|
|
237
|
-
def _get_container(self, container_key: str) -> ContainerProxy:
|
|
238
|
-
"""Get container client by key."""
|
|
239
|
-
return self._containers[container_key]
|
|
240
|
-
|
|
241
|
-
def _cache_partition_key(
|
|
242
|
-
self, container_key: str, doc_id: str, partition_key: str
|
|
243
|
-
) -> None:
|
|
244
|
-
"""
|
|
245
|
-
Cache the partition key mapping for a document.
|
|
246
|
-
|
|
247
|
-
This enables point reads for future operations, reducing RU consumption
|
|
248
|
-
by avoiding expensive cross-partition queries.
|
|
249
|
-
"""
|
|
250
|
-
cache = self._partition_key_cache[container_key]
|
|
251
|
-
|
|
252
|
-
# Evict oldest entries if cache is full (simple FIFO eviction)
|
|
253
|
-
if len(cache) >= self._cache_max_size:
|
|
254
|
-
# Remove first 10% of entries
|
|
255
|
-
keys_to_remove = list(cache.keys())[: self._cache_max_size // 10]
|
|
256
|
-
for key in keys_to_remove:
|
|
257
|
-
del cache[key]
|
|
258
|
-
|
|
259
|
-
cache[doc_id] = partition_key
|
|
260
|
-
|
|
261
|
-
def _get_cached_partition_key(
|
|
262
|
-
self, container_key: str, doc_id: str
|
|
263
|
-
) -> Optional[str]:
|
|
264
|
-
"""
|
|
265
|
-
Get cached partition key for a document if available.
|
|
266
|
-
|
|
267
|
-
Returns None if the partition key is not cached.
|
|
268
|
-
"""
|
|
269
|
-
return self._partition_key_cache.get(container_key, {}).get(doc_id)
|
|
270
|
-
|
|
271
|
-
def _invalidate_partition_key_cache(self, container_key: str, doc_id: str) -> None:
|
|
272
|
-
"""Remove a document from the partition key cache."""
|
|
273
|
-
cache = self._partition_key_cache.get(container_key, {})
|
|
274
|
-
cache.pop(doc_id, None)
|
|
275
|
-
|
|
276
|
-
def _point_read_document(
|
|
277
|
-
self,
|
|
278
|
-
container_key: str,
|
|
279
|
-
doc_id: str,
|
|
280
|
-
partition_key: Optional[str] = None,
|
|
281
|
-
) -> Optional[Dict[str, Any]]:
|
|
282
|
-
"""
|
|
283
|
-
Attempt to read a document using a point read (1 RU) instead of a query.
|
|
284
|
-
|
|
285
|
-
If partition_key is provided, performs a direct point read.
|
|
286
|
-
If partition_key is not provided but is cached, uses the cached value.
|
|
287
|
-
If neither is available, falls back to a cross-partition query.
|
|
288
|
-
|
|
289
|
-
Args:
|
|
290
|
-
container_key: The container key (e.g., 'heuristics', 'knowledge')
|
|
291
|
-
doc_id: The document ID
|
|
292
|
-
partition_key: Optional partition key for direct point read
|
|
293
|
-
|
|
294
|
-
Returns:
|
|
295
|
-
The document if found, None otherwise
|
|
296
|
-
"""
|
|
297
|
-
container = self._get_container(container_key)
|
|
298
|
-
|
|
299
|
-
# Try to get partition key from cache if not provided
|
|
300
|
-
if partition_key is None:
|
|
301
|
-
partition_key = self._get_cached_partition_key(container_key, doc_id)
|
|
302
|
-
|
|
303
|
-
# If we have a partition key, use point read (1 RU)
|
|
304
|
-
if partition_key is not None:
|
|
305
|
-
try:
|
|
306
|
-
doc = container.read_item(item=doc_id, partition_key=partition_key)
|
|
307
|
-
# Refresh cache on successful read
|
|
308
|
-
self._cache_partition_key(container_key, doc_id, partition_key)
|
|
309
|
-
return doc
|
|
310
|
-
except exceptions.CosmosResourceNotFoundError:
|
|
311
|
-
# Document not found or partition key was wrong
|
|
312
|
-
self._invalidate_partition_key_cache(container_key, doc_id)
|
|
313
|
-
# Fall through to cross-partition query
|
|
314
|
-
except Exception as e:
|
|
315
|
-
logger.warning(f"Point read failed for {doc_id}: {e}")
|
|
316
|
-
# Fall through to cross-partition query
|
|
317
|
-
|
|
318
|
-
# Fallback: Cross-partition query (expensive but necessary without partition key)
|
|
319
|
-
logger.debug(
|
|
320
|
-
f"Using cross-partition query for {doc_id} in {container_key} "
|
|
321
|
-
"(consider providing project_id for better performance)"
|
|
322
|
-
)
|
|
323
|
-
query = "SELECT * FROM c WHERE c.id = @id"
|
|
324
|
-
items = list(
|
|
325
|
-
container.query_items(
|
|
326
|
-
query=query,
|
|
327
|
-
parameters=[{"name": "@id", "value": doc_id}],
|
|
328
|
-
enable_cross_partition_query=True,
|
|
329
|
-
)
|
|
330
|
-
)
|
|
331
|
-
|
|
332
|
-
if items:
|
|
333
|
-
doc = items[0]
|
|
334
|
-
# Cache the partition key for future operations
|
|
335
|
-
pk_field = self._get_partition_key_field(container_key)
|
|
336
|
-
if pk_field and pk_field in doc:
|
|
337
|
-
self._cache_partition_key(container_key, doc_id, doc[pk_field])
|
|
338
|
-
return doc
|
|
339
|
-
|
|
340
|
-
return None
|
|
341
|
-
|
|
342
|
-
def _get_partition_key_field(self, container_key: str) -> Optional[str]:
|
|
343
|
-
"""Get the partition key field name for a container."""
|
|
344
|
-
partition_key_fields = {
|
|
345
|
-
MemoryType.HEURISTICS: "project_id",
|
|
346
|
-
MemoryType.OUTCOMES: "project_id",
|
|
347
|
-
MemoryType.PREFERENCES: "user_id",
|
|
348
|
-
MemoryType.DOMAIN_KNOWLEDGE: "project_id",
|
|
349
|
-
MemoryType.ANTI_PATTERNS: "project_id",
|
|
350
|
-
}
|
|
351
|
-
return partition_key_fields.get(container_key)
|
|
352
|
-
|
|
353
|
-
# ==================== WRITE OPERATIONS ====================
|
|
354
|
-
|
|
355
|
-
def save_heuristic(self, heuristic: Heuristic) -> str:
|
|
356
|
-
"""Save a heuristic."""
|
|
357
|
-
container = self._get_container(MemoryType.HEURISTICS)
|
|
358
|
-
|
|
359
|
-
doc = {
|
|
360
|
-
"id": heuristic.id,
|
|
361
|
-
"agent": heuristic.agent,
|
|
362
|
-
"project_id": heuristic.project_id,
|
|
363
|
-
"condition": heuristic.condition,
|
|
364
|
-
"strategy": heuristic.strategy,
|
|
365
|
-
"confidence": heuristic.confidence,
|
|
366
|
-
"occurrence_count": heuristic.occurrence_count,
|
|
367
|
-
"success_count": heuristic.success_count,
|
|
368
|
-
"last_validated": (
|
|
369
|
-
heuristic.last_validated.isoformat()
|
|
370
|
-
if heuristic.last_validated
|
|
371
|
-
else None
|
|
372
|
-
),
|
|
373
|
-
"created_at": (
|
|
374
|
-
heuristic.created_at.isoformat() if heuristic.created_at else None
|
|
375
|
-
),
|
|
376
|
-
"metadata": heuristic.metadata or {},
|
|
377
|
-
"embedding": heuristic.embedding,
|
|
378
|
-
"type": "heuristic",
|
|
379
|
-
}
|
|
380
|
-
|
|
381
|
-
container.upsert_item(doc)
|
|
382
|
-
# Cache partition key for efficient future updates
|
|
383
|
-
self._cache_partition_key(
|
|
384
|
-
MemoryType.HEURISTICS, heuristic.id, heuristic.project_id
|
|
385
|
-
)
|
|
386
|
-
logger.debug(f"Saved heuristic: {heuristic.id}")
|
|
387
|
-
return heuristic.id
|
|
388
|
-
|
|
389
|
-
def save_outcome(self, outcome: Outcome) -> str:
|
|
390
|
-
"""Save an outcome."""
|
|
391
|
-
container = self._get_container(MemoryType.OUTCOMES)
|
|
392
|
-
|
|
393
|
-
doc = {
|
|
394
|
-
"id": outcome.id,
|
|
395
|
-
"agent": outcome.agent,
|
|
396
|
-
"project_id": outcome.project_id,
|
|
397
|
-
"task_type": outcome.task_type,
|
|
398
|
-
"task_description": outcome.task_description,
|
|
399
|
-
"success": outcome.success,
|
|
400
|
-
"strategy_used": outcome.strategy_used,
|
|
401
|
-
"duration_ms": outcome.duration_ms,
|
|
402
|
-
"error_message": outcome.error_message,
|
|
403
|
-
"user_feedback": outcome.user_feedback,
|
|
404
|
-
"timestamp": outcome.timestamp.isoformat() if outcome.timestamp else None,
|
|
405
|
-
"metadata": outcome.metadata or {},
|
|
406
|
-
"embedding": outcome.embedding,
|
|
407
|
-
"type": "outcome",
|
|
408
|
-
}
|
|
409
|
-
|
|
410
|
-
container.upsert_item(doc)
|
|
411
|
-
# Cache partition key for efficient future updates
|
|
412
|
-
self._cache_partition_key(MemoryType.OUTCOMES, outcome.id, outcome.project_id)
|
|
413
|
-
logger.debug(f"Saved outcome: {outcome.id}")
|
|
414
|
-
return outcome.id
|
|
415
|
-
|
|
416
|
-
def save_user_preference(self, preference: UserPreference) -> str:
|
|
417
|
-
"""Save a user preference."""
|
|
418
|
-
container = self._get_container(MemoryType.PREFERENCES)
|
|
419
|
-
|
|
420
|
-
doc = {
|
|
421
|
-
"id": preference.id,
|
|
422
|
-
"user_id": preference.user_id,
|
|
423
|
-
"category": preference.category,
|
|
424
|
-
"preference": preference.preference,
|
|
425
|
-
"source": preference.source,
|
|
426
|
-
"confidence": preference.confidence,
|
|
427
|
-
"timestamp": (
|
|
428
|
-
preference.timestamp.isoformat() if preference.timestamp else None
|
|
429
|
-
),
|
|
430
|
-
"metadata": preference.metadata or {},
|
|
431
|
-
"type": "preference",
|
|
432
|
-
}
|
|
433
|
-
|
|
434
|
-
container.upsert_item(doc)
|
|
435
|
-
# Cache partition key for efficient future updates
|
|
436
|
-
self._cache_partition_key(
|
|
437
|
-
MemoryType.PREFERENCES, preference.id, preference.user_id
|
|
438
|
-
)
|
|
439
|
-
logger.debug(f"Saved preference: {preference.id}")
|
|
440
|
-
return preference.id
|
|
441
|
-
|
|
442
|
-
def save_domain_knowledge(self, knowledge: DomainKnowledge) -> str:
|
|
443
|
-
"""Save domain knowledge."""
|
|
444
|
-
container = self._get_container(MemoryType.DOMAIN_KNOWLEDGE)
|
|
445
|
-
|
|
446
|
-
doc = {
|
|
447
|
-
"id": knowledge.id,
|
|
448
|
-
"agent": knowledge.agent,
|
|
449
|
-
"project_id": knowledge.project_id,
|
|
450
|
-
"domain": knowledge.domain,
|
|
451
|
-
"fact": knowledge.fact,
|
|
452
|
-
"source": knowledge.source,
|
|
453
|
-
"confidence": knowledge.confidence,
|
|
454
|
-
"last_verified": (
|
|
455
|
-
knowledge.last_verified.isoformat() if knowledge.last_verified else None
|
|
456
|
-
),
|
|
457
|
-
"metadata": knowledge.metadata or {},
|
|
458
|
-
"embedding": knowledge.embedding,
|
|
459
|
-
"type": "domain_knowledge",
|
|
460
|
-
}
|
|
461
|
-
|
|
462
|
-
container.upsert_item(doc)
|
|
463
|
-
# Cache partition key for efficient future updates
|
|
464
|
-
self._cache_partition_key(
|
|
465
|
-
MemoryType.DOMAIN_KNOWLEDGE, knowledge.id, knowledge.project_id
|
|
466
|
-
)
|
|
467
|
-
logger.debug(f"Saved domain knowledge: {knowledge.id}")
|
|
468
|
-
return knowledge.id
|
|
469
|
-
|
|
470
|
-
def save_anti_pattern(self, anti_pattern: AntiPattern) -> str:
|
|
471
|
-
"""Save an anti-pattern."""
|
|
472
|
-
container = self._get_container(MemoryType.ANTI_PATTERNS)
|
|
473
|
-
|
|
474
|
-
doc = {
|
|
475
|
-
"id": anti_pattern.id,
|
|
476
|
-
"agent": anti_pattern.agent,
|
|
477
|
-
"project_id": anti_pattern.project_id,
|
|
478
|
-
"pattern": anti_pattern.pattern,
|
|
479
|
-
"why_bad": anti_pattern.why_bad,
|
|
480
|
-
"better_alternative": anti_pattern.better_alternative,
|
|
481
|
-
"occurrence_count": anti_pattern.occurrence_count,
|
|
482
|
-
"last_seen": (
|
|
483
|
-
anti_pattern.last_seen.isoformat() if anti_pattern.last_seen else None
|
|
484
|
-
),
|
|
485
|
-
"created_at": (
|
|
486
|
-
anti_pattern.created_at.isoformat() if anti_pattern.created_at else None
|
|
487
|
-
),
|
|
488
|
-
"metadata": anti_pattern.metadata or {},
|
|
489
|
-
"embedding": anti_pattern.embedding,
|
|
490
|
-
"type": "anti_pattern",
|
|
491
|
-
}
|
|
492
|
-
|
|
493
|
-
container.upsert_item(doc)
|
|
494
|
-
# Cache partition key for efficient future updates
|
|
495
|
-
self._cache_partition_key(
|
|
496
|
-
MemoryType.ANTI_PATTERNS, anti_pattern.id, anti_pattern.project_id
|
|
497
|
-
)
|
|
498
|
-
logger.debug(f"Saved anti-pattern: {anti_pattern.id}")
|
|
499
|
-
return anti_pattern.id
|
|
500
|
-
|
|
501
|
-
# ==================== READ OPERATIONS ====================
|
|
502
|
-
|
|
503
|
-
def get_heuristics(
|
|
504
|
-
self,
|
|
505
|
-
project_id: str,
|
|
506
|
-
agent: Optional[str] = None,
|
|
507
|
-
embedding: Optional[List[float]] = None,
|
|
508
|
-
top_k: int = 5,
|
|
509
|
-
min_confidence: float = 0.0,
|
|
510
|
-
) -> List[Heuristic]:
|
|
511
|
-
"""Get heuristics with optional vector search."""
|
|
512
|
-
container = self._get_container(MemoryType.HEURISTICS)
|
|
513
|
-
|
|
514
|
-
if embedding:
|
|
515
|
-
# Vector search query
|
|
516
|
-
query = """
|
|
517
|
-
SELECT TOP @top_k *
|
|
518
|
-
FROM c
|
|
519
|
-
WHERE c.project_id = @project_id
|
|
520
|
-
AND c.confidence >= @min_confidence
|
|
521
|
-
"""
|
|
522
|
-
if agent:
|
|
523
|
-
query += " AND c.agent = @agent"
|
|
524
|
-
query += " ORDER BY VectorDistance(c.embedding, @embedding)"
|
|
525
|
-
|
|
526
|
-
parameters = [
|
|
527
|
-
{"name": "@top_k", "value": top_k},
|
|
528
|
-
{"name": "@project_id", "value": project_id},
|
|
529
|
-
{"name": "@min_confidence", "value": min_confidence},
|
|
530
|
-
{"name": "@embedding", "value": embedding},
|
|
531
|
-
]
|
|
532
|
-
if agent:
|
|
533
|
-
parameters.append({"name": "@agent", "value": agent})
|
|
534
|
-
|
|
535
|
-
else:
|
|
536
|
-
# Regular query
|
|
537
|
-
query = """
|
|
538
|
-
SELECT TOP @top_k *
|
|
539
|
-
FROM c
|
|
540
|
-
WHERE c.project_id = @project_id
|
|
541
|
-
AND c.confidence >= @min_confidence
|
|
542
|
-
"""
|
|
543
|
-
if agent:
|
|
544
|
-
query += " AND c.agent = @agent"
|
|
545
|
-
query += " ORDER BY c.confidence DESC"
|
|
546
|
-
|
|
547
|
-
parameters = [
|
|
548
|
-
{"name": "@top_k", "value": top_k},
|
|
549
|
-
{"name": "@project_id", "value": project_id},
|
|
550
|
-
{"name": "@min_confidence", "value": min_confidence},
|
|
551
|
-
]
|
|
552
|
-
if agent:
|
|
553
|
-
parameters.append({"name": "@agent", "value": agent})
|
|
554
|
-
|
|
555
|
-
items = list(
|
|
556
|
-
container.query_items(
|
|
557
|
-
query=query,
|
|
558
|
-
parameters=parameters,
|
|
559
|
-
enable_cross_partition_query=False,
|
|
560
|
-
partition_key=project_id,
|
|
561
|
-
)
|
|
562
|
-
)
|
|
563
|
-
|
|
564
|
-
# Cache partition keys for efficient future updates
|
|
565
|
-
for doc in items:
|
|
566
|
-
self._cache_partition_key(
|
|
567
|
-
MemoryType.HEURISTICS, doc["id"], doc["project_id"]
|
|
568
|
-
)
|
|
569
|
-
|
|
570
|
-
return [self._doc_to_heuristic(doc) for doc in items]
|
|
571
|
-
|
|
572
|
-
def get_outcomes(
|
|
573
|
-
self,
|
|
574
|
-
project_id: str,
|
|
575
|
-
agent: Optional[str] = None,
|
|
576
|
-
task_type: Optional[str] = None,
|
|
577
|
-
embedding: Optional[List[float]] = None,
|
|
578
|
-
top_k: int = 5,
|
|
579
|
-
success_only: bool = False,
|
|
580
|
-
) -> List[Outcome]:
|
|
581
|
-
"""Get outcomes with optional vector search."""
|
|
582
|
-
container = self._get_container(MemoryType.OUTCOMES)
|
|
583
|
-
|
|
584
|
-
if embedding:
|
|
585
|
-
# Vector search query
|
|
586
|
-
query = """
|
|
587
|
-
SELECT TOP @top_k *
|
|
588
|
-
FROM c
|
|
589
|
-
WHERE c.project_id = @project_id
|
|
590
|
-
"""
|
|
591
|
-
parameters = [
|
|
592
|
-
{"name": "@top_k", "value": top_k},
|
|
593
|
-
{"name": "@project_id", "value": project_id},
|
|
594
|
-
{"name": "@embedding", "value": embedding},
|
|
595
|
-
]
|
|
596
|
-
|
|
597
|
-
if agent:
|
|
598
|
-
query += " AND c.agent = @agent"
|
|
599
|
-
parameters.append({"name": "@agent", "value": agent})
|
|
600
|
-
if task_type:
|
|
601
|
-
query += " AND c.task_type = @task_type"
|
|
602
|
-
parameters.append({"name": "@task_type", "value": task_type})
|
|
603
|
-
if success_only:
|
|
604
|
-
query += " AND c.success = true"
|
|
605
|
-
|
|
606
|
-
query += " ORDER BY VectorDistance(c.embedding, @embedding)"
|
|
607
|
-
|
|
608
|
-
else:
|
|
609
|
-
# Regular query
|
|
610
|
-
query = """
|
|
611
|
-
SELECT TOP @top_k *
|
|
612
|
-
FROM c
|
|
613
|
-
WHERE c.project_id = @project_id
|
|
614
|
-
"""
|
|
615
|
-
parameters = [
|
|
616
|
-
{"name": "@top_k", "value": top_k},
|
|
617
|
-
{"name": "@project_id", "value": project_id},
|
|
618
|
-
]
|
|
619
|
-
|
|
620
|
-
if agent:
|
|
621
|
-
query += " AND c.agent = @agent"
|
|
622
|
-
parameters.append({"name": "@agent", "value": agent})
|
|
623
|
-
if task_type:
|
|
624
|
-
query += " AND c.task_type = @task_type"
|
|
625
|
-
parameters.append({"name": "@task_type", "value": task_type})
|
|
626
|
-
if success_only:
|
|
627
|
-
query += " AND c.success = true"
|
|
628
|
-
|
|
629
|
-
query += " ORDER BY c.timestamp DESC"
|
|
630
|
-
|
|
631
|
-
items = list(
|
|
632
|
-
container.query_items(
|
|
633
|
-
query=query,
|
|
634
|
-
parameters=parameters,
|
|
635
|
-
enable_cross_partition_query=False,
|
|
636
|
-
partition_key=project_id,
|
|
637
|
-
)
|
|
638
|
-
)
|
|
639
|
-
|
|
640
|
-
# Cache partition keys for efficient future updates
|
|
641
|
-
for doc in items:
|
|
642
|
-
self._cache_partition_key(MemoryType.OUTCOMES, doc["id"], doc["project_id"])
|
|
643
|
-
|
|
644
|
-
return [self._doc_to_outcome(doc) for doc in items]
|
|
645
|
-
|
|
646
|
-
def get_user_preferences(
|
|
647
|
-
self,
|
|
648
|
-
user_id: str,
|
|
649
|
-
category: Optional[str] = None,
|
|
650
|
-
) -> List[UserPreference]:
|
|
651
|
-
"""Get user preferences."""
|
|
652
|
-
container = self._get_container(MemoryType.PREFERENCES)
|
|
653
|
-
|
|
654
|
-
query = "SELECT * FROM c WHERE c.user_id = @user_id"
|
|
655
|
-
parameters = [{"name": "@user_id", "value": user_id}]
|
|
656
|
-
|
|
657
|
-
if category:
|
|
658
|
-
query += " AND c.category = @category"
|
|
659
|
-
parameters.append({"name": "@category", "value": category})
|
|
660
|
-
|
|
661
|
-
items = list(
|
|
662
|
-
container.query_items(
|
|
663
|
-
query=query,
|
|
664
|
-
parameters=parameters,
|
|
665
|
-
enable_cross_partition_query=False,
|
|
666
|
-
partition_key=user_id,
|
|
667
|
-
)
|
|
668
|
-
)
|
|
669
|
-
|
|
670
|
-
# Cache partition keys for efficient future updates
|
|
671
|
-
for doc in items:
|
|
672
|
-
self._cache_partition_key(MemoryType.PREFERENCES, doc["id"], doc["user_id"])
|
|
673
|
-
|
|
674
|
-
return [self._doc_to_preference(doc) for doc in items]
|
|
675
|
-
|
|
676
|
-
def get_domain_knowledge(
|
|
677
|
-
self,
|
|
678
|
-
project_id: str,
|
|
679
|
-
agent: Optional[str] = None,
|
|
680
|
-
domain: Optional[str] = None,
|
|
681
|
-
embedding: Optional[List[float]] = None,
|
|
682
|
-
top_k: int = 5,
|
|
683
|
-
) -> List[DomainKnowledge]:
|
|
684
|
-
"""Get domain knowledge with optional vector search."""
|
|
685
|
-
container = self._get_container(MemoryType.DOMAIN_KNOWLEDGE)
|
|
686
|
-
|
|
687
|
-
if embedding:
|
|
688
|
-
query = """
|
|
689
|
-
SELECT TOP @top_k *
|
|
690
|
-
FROM c
|
|
691
|
-
WHERE c.project_id = @project_id
|
|
692
|
-
"""
|
|
693
|
-
parameters = [
|
|
694
|
-
{"name": "@top_k", "value": top_k},
|
|
695
|
-
{"name": "@project_id", "value": project_id},
|
|
696
|
-
{"name": "@embedding", "value": embedding},
|
|
697
|
-
]
|
|
698
|
-
|
|
699
|
-
if agent:
|
|
700
|
-
query += " AND c.agent = @agent"
|
|
701
|
-
parameters.append({"name": "@agent", "value": agent})
|
|
702
|
-
if domain:
|
|
703
|
-
query += " AND c.domain = @domain"
|
|
704
|
-
parameters.append({"name": "@domain", "value": domain})
|
|
705
|
-
|
|
706
|
-
query += " ORDER BY VectorDistance(c.embedding, @embedding)"
|
|
707
|
-
|
|
708
|
-
else:
|
|
709
|
-
query = """
|
|
710
|
-
SELECT TOP @top_k *
|
|
711
|
-
FROM c
|
|
712
|
-
WHERE c.project_id = @project_id
|
|
713
|
-
"""
|
|
714
|
-
parameters = [
|
|
715
|
-
{"name": "@top_k", "value": top_k},
|
|
716
|
-
{"name": "@project_id", "value": project_id},
|
|
717
|
-
]
|
|
718
|
-
|
|
719
|
-
if agent:
|
|
720
|
-
query += " AND c.agent = @agent"
|
|
721
|
-
parameters.append({"name": "@agent", "value": agent})
|
|
722
|
-
if domain:
|
|
723
|
-
query += " AND c.domain = @domain"
|
|
724
|
-
parameters.append({"name": "@domain", "value": domain})
|
|
725
|
-
|
|
726
|
-
query += " ORDER BY c.confidence DESC"
|
|
727
|
-
|
|
728
|
-
items = list(
|
|
729
|
-
container.query_items(
|
|
730
|
-
query=query,
|
|
731
|
-
parameters=parameters,
|
|
732
|
-
enable_cross_partition_query=False,
|
|
733
|
-
partition_key=project_id,
|
|
734
|
-
)
|
|
735
|
-
)
|
|
736
|
-
|
|
737
|
-
# Cache partition keys for efficient future updates
|
|
738
|
-
for doc in items:
|
|
739
|
-
self._cache_partition_key(
|
|
740
|
-
MemoryType.DOMAIN_KNOWLEDGE, doc["id"], doc["project_id"]
|
|
741
|
-
)
|
|
742
|
-
|
|
743
|
-
return [self._doc_to_domain_knowledge(doc) for doc in items]
|
|
744
|
-
|
|
745
|
-
def get_anti_patterns(
|
|
746
|
-
self,
|
|
747
|
-
project_id: str,
|
|
748
|
-
agent: Optional[str] = None,
|
|
749
|
-
embedding: Optional[List[float]] = None,
|
|
750
|
-
top_k: int = 5,
|
|
751
|
-
) -> List[AntiPattern]:
|
|
752
|
-
"""Get anti-patterns with optional vector search."""
|
|
753
|
-
container = self._get_container(MemoryType.ANTI_PATTERNS)
|
|
754
|
-
|
|
755
|
-
if embedding:
|
|
756
|
-
query = """
|
|
757
|
-
SELECT TOP @top_k *
|
|
758
|
-
FROM c
|
|
759
|
-
WHERE c.project_id = @project_id
|
|
760
|
-
"""
|
|
761
|
-
parameters = [
|
|
762
|
-
{"name": "@top_k", "value": top_k},
|
|
763
|
-
{"name": "@project_id", "value": project_id},
|
|
764
|
-
{"name": "@embedding", "value": embedding},
|
|
765
|
-
]
|
|
766
|
-
|
|
767
|
-
if agent:
|
|
768
|
-
query += " AND c.agent = @agent"
|
|
769
|
-
parameters.append({"name": "@agent", "value": agent})
|
|
770
|
-
|
|
771
|
-
query += " ORDER BY VectorDistance(c.embedding, @embedding)"
|
|
772
|
-
|
|
773
|
-
else:
|
|
774
|
-
query = """
|
|
775
|
-
SELECT TOP @top_k *
|
|
776
|
-
FROM c
|
|
777
|
-
WHERE c.project_id = @project_id
|
|
778
|
-
"""
|
|
779
|
-
parameters = [
|
|
780
|
-
{"name": "@top_k", "value": top_k},
|
|
781
|
-
{"name": "@project_id", "value": project_id},
|
|
782
|
-
]
|
|
783
|
-
|
|
784
|
-
if agent:
|
|
785
|
-
query += " AND c.agent = @agent"
|
|
786
|
-
parameters.append({"name": "@agent", "value": agent})
|
|
787
|
-
|
|
788
|
-
query += " ORDER BY c.occurrence_count DESC"
|
|
789
|
-
|
|
790
|
-
items = list(
|
|
791
|
-
container.query_items(
|
|
792
|
-
query=query,
|
|
793
|
-
parameters=parameters,
|
|
794
|
-
enable_cross_partition_query=False,
|
|
795
|
-
partition_key=project_id,
|
|
796
|
-
)
|
|
797
|
-
)
|
|
798
|
-
|
|
799
|
-
# Cache partition keys for efficient future updates
|
|
800
|
-
for doc in items:
|
|
801
|
-
self._cache_partition_key(
|
|
802
|
-
MemoryType.ANTI_PATTERNS, doc["id"], doc["project_id"]
|
|
803
|
-
)
|
|
804
|
-
|
|
805
|
-
return [self._doc_to_anti_pattern(doc) for doc in items]
|
|
806
|
-
|
|
807
|
-
# ==================== UPDATE OPERATIONS ====================
|
|
808
|
-
|
|
809
|
-
def update_heuristic(
|
|
810
|
-
self,
|
|
811
|
-
heuristic_id: str,
|
|
812
|
-
updates: Dict[str, Any],
|
|
813
|
-
project_id: Optional[str] = None,
|
|
814
|
-
) -> bool:
|
|
815
|
-
"""
|
|
816
|
-
Update a heuristic's fields.
|
|
817
|
-
|
|
818
|
-
Args:
|
|
819
|
-
heuristic_id: The heuristic document ID
|
|
820
|
-
updates: Dictionary of fields to update
|
|
821
|
-
project_id: Optional partition key for efficient point read (1 RU).
|
|
822
|
-
If not provided, will attempt cache lookup, then
|
|
823
|
-
fall back to cross-partition query (more expensive).
|
|
824
|
-
|
|
825
|
-
Returns:
|
|
826
|
-
True if update succeeded, False if document not found
|
|
827
|
-
"""
|
|
828
|
-
container = self._get_container(MemoryType.HEURISTICS)
|
|
829
|
-
|
|
830
|
-
# Use optimized point read with cache fallback
|
|
831
|
-
doc = self._point_read_document(MemoryType.HEURISTICS, heuristic_id, project_id)
|
|
832
|
-
|
|
833
|
-
if not doc:
|
|
834
|
-
return False
|
|
835
|
-
|
|
836
|
-
# Apply updates
|
|
837
|
-
for key, value in updates.items():
|
|
838
|
-
if isinstance(value, datetime):
|
|
839
|
-
doc[key] = value.isoformat()
|
|
840
|
-
else:
|
|
841
|
-
doc[key] = value
|
|
842
|
-
|
|
843
|
-
container.replace_item(item=heuristic_id, body=doc)
|
|
844
|
-
return True
|
|
845
|
-
|
|
846
|
-
def increment_heuristic_occurrence(
|
|
847
|
-
self,
|
|
848
|
-
heuristic_id: str,
|
|
849
|
-
success: bool,
|
|
850
|
-
project_id: Optional[str] = None,
|
|
851
|
-
) -> bool:
|
|
852
|
-
"""
|
|
853
|
-
Increment heuristic occurrence count.
|
|
854
|
-
|
|
855
|
-
Args:
|
|
856
|
-
heuristic_id: The heuristic document ID
|
|
857
|
-
success: Whether this occurrence was successful
|
|
858
|
-
project_id: Optional partition key for efficient point read (1 RU).
|
|
859
|
-
If not provided, will attempt cache lookup, then
|
|
860
|
-
fall back to cross-partition query (more expensive).
|
|
861
|
-
|
|
862
|
-
Returns:
|
|
863
|
-
True if update succeeded, False if document not found
|
|
864
|
-
"""
|
|
865
|
-
container = self._get_container(MemoryType.HEURISTICS)
|
|
866
|
-
|
|
867
|
-
# Use optimized point read with cache fallback
|
|
868
|
-
doc = self._point_read_document(MemoryType.HEURISTICS, heuristic_id, project_id)
|
|
869
|
-
|
|
870
|
-
if not doc:
|
|
871
|
-
return False
|
|
872
|
-
|
|
873
|
-
doc["occurrence_count"] = doc.get("occurrence_count", 0) + 1
|
|
874
|
-
if success:
|
|
875
|
-
doc["success_count"] = doc.get("success_count", 0) + 1
|
|
876
|
-
doc["last_validated"] = datetime.now(timezone.utc).isoformat()
|
|
877
|
-
|
|
878
|
-
container.replace_item(item=heuristic_id, body=doc)
|
|
879
|
-
return True
|
|
880
|
-
|
|
881
|
-
def update_heuristic_confidence(
|
|
882
|
-
self,
|
|
883
|
-
heuristic_id: str,
|
|
884
|
-
new_confidence: float,
|
|
885
|
-
project_id: Optional[str] = None,
|
|
886
|
-
) -> bool:
|
|
887
|
-
"""
|
|
888
|
-
Update confidence score for a heuristic.
|
|
889
|
-
|
|
890
|
-
Args:
|
|
891
|
-
heuristic_id: The heuristic document ID
|
|
892
|
-
new_confidence: The new confidence value
|
|
893
|
-
project_id: Optional partition key for efficient point read (1 RU).
|
|
894
|
-
If not provided, will attempt cache lookup, then
|
|
895
|
-
fall back to cross-partition query (more expensive).
|
|
896
|
-
|
|
897
|
-
Returns:
|
|
898
|
-
True if update succeeded, False if document not found
|
|
899
|
-
|
|
900
|
-
Performance Note:
|
|
901
|
-
- With project_id: 1 RU for point read + write cost
|
|
902
|
-
- With cached partition key: 1 RU for point read + write cost
|
|
903
|
-
- Without either: Cross-partition query (variable, higher RUs)
|
|
904
|
-
"""
|
|
905
|
-
container = self._get_container(MemoryType.HEURISTICS)
|
|
906
|
-
|
|
907
|
-
# Use optimized point read with cache fallback
|
|
908
|
-
doc = self._point_read_document(MemoryType.HEURISTICS, heuristic_id, project_id)
|
|
909
|
-
|
|
910
|
-
if not doc:
|
|
911
|
-
return False
|
|
912
|
-
|
|
913
|
-
doc["confidence"] = new_confidence
|
|
914
|
-
|
|
915
|
-
container.replace_item(item=heuristic_id, body=doc)
|
|
916
|
-
logger.debug(
|
|
917
|
-
f"Updated heuristic confidence: {heuristic_id} -> {new_confidence}"
|
|
918
|
-
)
|
|
919
|
-
return True
|
|
920
|
-
|
|
921
|
-
def update_knowledge_confidence(
|
|
922
|
-
self,
|
|
923
|
-
knowledge_id: str,
|
|
924
|
-
new_confidence: float,
|
|
925
|
-
project_id: Optional[str] = None,
|
|
926
|
-
) -> bool:
|
|
927
|
-
"""
|
|
928
|
-
Update confidence score for domain knowledge.
|
|
929
|
-
|
|
930
|
-
Args:
|
|
931
|
-
knowledge_id: The knowledge document ID
|
|
932
|
-
new_confidence: The new confidence value
|
|
933
|
-
project_id: Optional partition key for efficient point read (1 RU).
|
|
934
|
-
If not provided, will attempt cache lookup, then
|
|
935
|
-
fall back to cross-partition query (more expensive).
|
|
936
|
-
|
|
937
|
-
Returns:
|
|
938
|
-
True if update succeeded, False if document not found
|
|
939
|
-
|
|
940
|
-
Performance Note:
|
|
941
|
-
- With project_id: 1 RU for point read + write cost
|
|
942
|
-
- With cached partition key: 1 RU for point read + write cost
|
|
943
|
-
- Without either: Cross-partition query (variable, higher RUs)
|
|
944
|
-
"""
|
|
945
|
-
container = self._get_container(MemoryType.DOMAIN_KNOWLEDGE)
|
|
946
|
-
|
|
947
|
-
# Use optimized point read with cache fallback
|
|
948
|
-
doc = self._point_read_document(
|
|
949
|
-
MemoryType.DOMAIN_KNOWLEDGE, knowledge_id, project_id
|
|
950
|
-
)
|
|
951
|
-
|
|
952
|
-
if not doc:
|
|
953
|
-
return False
|
|
954
|
-
|
|
955
|
-
doc["confidence"] = new_confidence
|
|
956
|
-
|
|
957
|
-
container.replace_item(item=knowledge_id, body=doc)
|
|
958
|
-
logger.debug(
|
|
959
|
-
f"Updated knowledge confidence: {knowledge_id} -> {new_confidence}"
|
|
960
|
-
)
|
|
961
|
-
return True
|
|
962
|
-
|
|
963
|
-
# ==================== DELETE OPERATIONS ====================
|
|
964
|
-
|
|
965
|
-
def delete_outcomes_older_than(
|
|
966
|
-
self,
|
|
967
|
-
project_id: str,
|
|
968
|
-
older_than: datetime,
|
|
969
|
-
agent: Optional[str] = None,
|
|
970
|
-
) -> int:
|
|
971
|
-
"""Delete old outcomes."""
|
|
972
|
-
container = self._get_container(MemoryType.OUTCOMES)
|
|
973
|
-
|
|
974
|
-
query = """
|
|
975
|
-
SELECT c.id FROM c
|
|
976
|
-
WHERE c.project_id = @project_id
|
|
977
|
-
AND c.timestamp < @older_than
|
|
978
|
-
"""
|
|
979
|
-
parameters = [
|
|
980
|
-
{"name": "@project_id", "value": project_id},
|
|
981
|
-
{"name": "@older_than", "value": older_than.isoformat()},
|
|
982
|
-
]
|
|
983
|
-
|
|
984
|
-
if agent:
|
|
985
|
-
query += " AND c.agent = @agent"
|
|
986
|
-
parameters.append({"name": "@agent", "value": agent})
|
|
987
|
-
|
|
988
|
-
items = list(
|
|
989
|
-
container.query_items(
|
|
990
|
-
query=query,
|
|
991
|
-
parameters=parameters,
|
|
992
|
-
enable_cross_partition_query=False,
|
|
993
|
-
partition_key=project_id,
|
|
994
|
-
)
|
|
995
|
-
)
|
|
996
|
-
|
|
997
|
-
deleted = 0
|
|
998
|
-
for item in items:
|
|
999
|
-
try:
|
|
1000
|
-
container.delete_item(item=item["id"], partition_key=project_id)
|
|
1001
|
-
deleted += 1
|
|
1002
|
-
except exceptions.CosmosResourceNotFoundError:
|
|
1003
|
-
pass
|
|
1004
|
-
|
|
1005
|
-
logger.info(f"Deleted {deleted} old outcomes")
|
|
1006
|
-
return deleted
|
|
1007
|
-
|
|
1008
|
-
def delete_low_confidence_heuristics(
|
|
1009
|
-
self,
|
|
1010
|
-
project_id: str,
|
|
1011
|
-
below_confidence: float,
|
|
1012
|
-
agent: Optional[str] = None,
|
|
1013
|
-
) -> int:
|
|
1014
|
-
"""Delete low-confidence heuristics."""
|
|
1015
|
-
container = self._get_container(MemoryType.HEURISTICS)
|
|
1016
|
-
|
|
1017
|
-
query = """
|
|
1018
|
-
SELECT c.id FROM c
|
|
1019
|
-
WHERE c.project_id = @project_id
|
|
1020
|
-
AND c.confidence < @below_confidence
|
|
1021
|
-
"""
|
|
1022
|
-
parameters = [
|
|
1023
|
-
{"name": "@project_id", "value": project_id},
|
|
1024
|
-
{"name": "@below_confidence", "value": below_confidence},
|
|
1025
|
-
]
|
|
1026
|
-
|
|
1027
|
-
if agent:
|
|
1028
|
-
query += " AND c.agent = @agent"
|
|
1029
|
-
parameters.append({"name": "@agent", "value": agent})
|
|
1030
|
-
|
|
1031
|
-
items = list(
|
|
1032
|
-
container.query_items(
|
|
1033
|
-
query=query,
|
|
1034
|
-
parameters=parameters,
|
|
1035
|
-
enable_cross_partition_query=False,
|
|
1036
|
-
partition_key=project_id,
|
|
1037
|
-
)
|
|
1038
|
-
)
|
|
1039
|
-
|
|
1040
|
-
deleted = 0
|
|
1041
|
-
for item in items:
|
|
1042
|
-
try:
|
|
1043
|
-
container.delete_item(item=item["id"], partition_key=project_id)
|
|
1044
|
-
deleted += 1
|
|
1045
|
-
except exceptions.CosmosResourceNotFoundError:
|
|
1046
|
-
pass
|
|
1047
|
-
|
|
1048
|
-
logger.info(f"Deleted {deleted} low-confidence heuristics")
|
|
1049
|
-
return deleted
|
|
1050
|
-
|
|
1051
|
-
def delete_heuristic(
|
|
1052
|
-
self, heuristic_id: str, project_id: Optional[str] = None
|
|
1053
|
-
) -> bool:
|
|
1054
|
-
"""
|
|
1055
|
-
Delete a specific heuristic by ID.
|
|
1056
|
-
|
|
1057
|
-
Args:
|
|
1058
|
-
heuristic_id: The heuristic document ID
|
|
1059
|
-
project_id: Optional partition key for efficient point read (1 RU).
|
|
1060
|
-
If not provided, will attempt cache lookup, then
|
|
1061
|
-
fall back to cross-partition query (more expensive).
|
|
1062
|
-
|
|
1063
|
-
Returns:
|
|
1064
|
-
True if deletion succeeded, False if document not found
|
|
1065
|
-
"""
|
|
1066
|
-
container = self._get_container(MemoryType.HEURISTICS)
|
|
1067
|
-
|
|
1068
|
-
# Try to get partition key from cache if not provided
|
|
1069
|
-
if project_id is None:
|
|
1070
|
-
project_id = self._get_cached_partition_key(
|
|
1071
|
-
MemoryType.HEURISTICS, heuristic_id
|
|
1072
|
-
)
|
|
1073
|
-
|
|
1074
|
-
# If we have a partition key, try direct delete
|
|
1075
|
-
if project_id is not None:
|
|
1076
|
-
try:
|
|
1077
|
-
container.delete_item(item=heuristic_id, partition_key=project_id)
|
|
1078
|
-
self._invalidate_partition_key_cache(
|
|
1079
|
-
MemoryType.HEURISTICS, heuristic_id
|
|
1080
|
-
)
|
|
1081
|
-
return True
|
|
1082
|
-
except exceptions.CosmosResourceNotFoundError:
|
|
1083
|
-
# Document not found or partition key was wrong
|
|
1084
|
-
self._invalidate_partition_key_cache(
|
|
1085
|
-
MemoryType.HEURISTICS, heuristic_id
|
|
1086
|
-
)
|
|
1087
|
-
# Fall through to cross-partition lookup
|
|
1088
|
-
|
|
1089
|
-
# Fallback: Cross-partition query to find the document
|
|
1090
|
-
logger.debug(
|
|
1091
|
-
f"Using cross-partition query for delete {heuristic_id} "
|
|
1092
|
-
"(consider providing project_id for better performance)"
|
|
1093
|
-
)
|
|
1094
|
-
doc = self._point_read_document(MemoryType.HEURISTICS, heuristic_id, None)
|
|
1095
|
-
|
|
1096
|
-
if not doc:
|
|
1097
|
-
return False
|
|
1098
|
-
|
|
1099
|
-
project_id = doc["project_id"]
|
|
1100
|
-
|
|
1101
|
-
try:
|
|
1102
|
-
container.delete_item(item=heuristic_id, partition_key=project_id)
|
|
1103
|
-
self._invalidate_partition_key_cache(MemoryType.HEURISTICS, heuristic_id)
|
|
1104
|
-
return True
|
|
1105
|
-
except exceptions.CosmosResourceNotFoundError:
|
|
1106
|
-
return False
|
|
1107
|
-
|
|
1108
|
-
# ==================== STATS ====================
|
|
1109
|
-
|
|
1110
|
-
def get_stats(
|
|
1111
|
-
self,
|
|
1112
|
-
project_id: str,
|
|
1113
|
-
agent: Optional[str] = None,
|
|
1114
|
-
) -> Dict[str, Any]:
|
|
1115
|
-
"""Get memory statistics."""
|
|
1116
|
-
stats = {
|
|
1117
|
-
"project_id": project_id,
|
|
1118
|
-
"agent": agent,
|
|
1119
|
-
"storage_type": "azure_cosmos",
|
|
1120
|
-
"database": self.database_name,
|
|
1121
|
-
}
|
|
1122
|
-
|
|
1123
|
-
# Count items in each container using canonical memory types
|
|
1124
|
-
for memory_type in MemoryType.ALL:
|
|
1125
|
-
container = self._get_container(memory_type)
|
|
1126
|
-
|
|
1127
|
-
if memory_type == MemoryType.PREFERENCES:
|
|
1128
|
-
# Preferences use user_id, not project_id
|
|
1129
|
-
result = list(
|
|
1130
|
-
container.query_items(
|
|
1131
|
-
query="SELECT VALUE COUNT(1) FROM c",
|
|
1132
|
-
enable_cross_partition_query=True,
|
|
1133
|
-
)
|
|
1134
|
-
)
|
|
1135
|
-
else:
|
|
1136
|
-
query = "SELECT VALUE COUNT(1) FROM c WHERE c.project_id = @project_id"
|
|
1137
|
-
parameters = [{"name": "@project_id", "value": project_id}]
|
|
1138
|
-
|
|
1139
|
-
if agent:
|
|
1140
|
-
query = """
|
|
1141
|
-
SELECT VALUE COUNT(1) FROM c
|
|
1142
|
-
WHERE c.project_id = @project_id AND c.agent = @agent
|
|
1143
|
-
"""
|
|
1144
|
-
parameters.append({"name": "@agent", "value": agent})
|
|
1145
|
-
|
|
1146
|
-
result = list(
|
|
1147
|
-
container.query_items(
|
|
1148
|
-
query=query,
|
|
1149
|
-
parameters=parameters,
|
|
1150
|
-
enable_cross_partition_query=False,
|
|
1151
|
-
partition_key=project_id,
|
|
1152
|
-
)
|
|
1153
|
-
)
|
|
1154
|
-
stats[f"{memory_type}_count"] = result[0] if result else 0
|
|
1155
|
-
|
|
1156
|
-
stats["total_count"] = sum(
|
|
1157
|
-
stats.get(k, 0) for k in stats if k.endswith("_count")
|
|
1158
|
-
)
|
|
1159
|
-
|
|
1160
|
-
return stats
|
|
1161
|
-
|
|
1162
|
-
# ==================== HELPERS ====================
|
|
1163
|
-
|
|
1164
|
-
def _parse_datetime(self, value: Any) -> Optional[datetime]:
|
|
1165
|
-
"""Parse datetime from string."""
|
|
1166
|
-
if value is None:
|
|
1167
|
-
return None
|
|
1168
|
-
if isinstance(value, datetime):
|
|
1169
|
-
return value
|
|
1170
|
-
try:
|
|
1171
|
-
return datetime.fromisoformat(value.replace("Z", "+00:00"))
|
|
1172
|
-
except (ValueError, AttributeError):
|
|
1173
|
-
return None
|
|
1174
|
-
|
|
1175
|
-
def _doc_to_heuristic(self, doc: Dict[str, Any]) -> Heuristic:
|
|
1176
|
-
"""Convert Cosmos DB document to Heuristic."""
|
|
1177
|
-
return Heuristic(
|
|
1178
|
-
id=doc["id"],
|
|
1179
|
-
agent=doc["agent"],
|
|
1180
|
-
project_id=doc["project_id"],
|
|
1181
|
-
condition=doc["condition"],
|
|
1182
|
-
strategy=doc["strategy"],
|
|
1183
|
-
confidence=doc.get("confidence", 0.0),
|
|
1184
|
-
occurrence_count=doc.get("occurrence_count", 0),
|
|
1185
|
-
success_count=doc.get("success_count", 0),
|
|
1186
|
-
last_validated=self._parse_datetime(doc.get("last_validated"))
|
|
1187
|
-
or datetime.now(timezone.utc),
|
|
1188
|
-
created_at=self._parse_datetime(doc.get("created_at"))
|
|
1189
|
-
or datetime.now(timezone.utc),
|
|
1190
|
-
embedding=doc.get("embedding"),
|
|
1191
|
-
metadata=doc.get("metadata", {}),
|
|
1192
|
-
)
|
|
1193
|
-
|
|
1194
|
-
def _doc_to_outcome(self, doc: Dict[str, Any]) -> Outcome:
|
|
1195
|
-
"""Convert Cosmos DB document to Outcome."""
|
|
1196
|
-
return Outcome(
|
|
1197
|
-
id=doc["id"],
|
|
1198
|
-
agent=doc["agent"],
|
|
1199
|
-
project_id=doc["project_id"],
|
|
1200
|
-
task_type=doc.get("task_type", "general"),
|
|
1201
|
-
task_description=doc["task_description"],
|
|
1202
|
-
success=doc.get("success", False),
|
|
1203
|
-
strategy_used=doc.get("strategy_used", ""),
|
|
1204
|
-
duration_ms=doc.get("duration_ms"),
|
|
1205
|
-
error_message=doc.get("error_message"),
|
|
1206
|
-
user_feedback=doc.get("user_feedback"),
|
|
1207
|
-
timestamp=self._parse_datetime(doc.get("timestamp"))
|
|
1208
|
-
or datetime.now(timezone.utc),
|
|
1209
|
-
embedding=doc.get("embedding"),
|
|
1210
|
-
metadata=doc.get("metadata", {}),
|
|
1211
|
-
)
|
|
1212
|
-
|
|
1213
|
-
def _doc_to_preference(self, doc: Dict[str, Any]) -> UserPreference:
|
|
1214
|
-
"""Convert Cosmos DB document to UserPreference."""
|
|
1215
|
-
return UserPreference(
|
|
1216
|
-
id=doc["id"],
|
|
1217
|
-
user_id=doc["user_id"],
|
|
1218
|
-
category=doc.get("category", "general"),
|
|
1219
|
-
preference=doc["preference"],
|
|
1220
|
-
source=doc.get("source", "unknown"),
|
|
1221
|
-
confidence=doc.get("confidence", 1.0),
|
|
1222
|
-
timestamp=self._parse_datetime(doc.get("timestamp"))
|
|
1223
|
-
or datetime.now(timezone.utc),
|
|
1224
|
-
metadata=doc.get("metadata", {}),
|
|
1225
|
-
)
|
|
1226
|
-
|
|
1227
|
-
def _doc_to_domain_knowledge(self, doc: Dict[str, Any]) -> DomainKnowledge:
|
|
1228
|
-
"""Convert Cosmos DB document to DomainKnowledge."""
|
|
1229
|
-
return DomainKnowledge(
|
|
1230
|
-
id=doc["id"],
|
|
1231
|
-
agent=doc["agent"],
|
|
1232
|
-
project_id=doc["project_id"],
|
|
1233
|
-
domain=doc.get("domain", "general"),
|
|
1234
|
-
fact=doc["fact"],
|
|
1235
|
-
source=doc.get("source", "unknown"),
|
|
1236
|
-
confidence=doc.get("confidence", 1.0),
|
|
1237
|
-
last_verified=self._parse_datetime(doc.get("last_verified"))
|
|
1238
|
-
or datetime.now(timezone.utc),
|
|
1239
|
-
embedding=doc.get("embedding"),
|
|
1240
|
-
metadata=doc.get("metadata", {}),
|
|
1241
|
-
)
|
|
1242
|
-
|
|
1243
|
-
def _doc_to_anti_pattern(self, doc: Dict[str, Any]) -> AntiPattern:
|
|
1244
|
-
"""Convert Cosmos DB document to AntiPattern."""
|
|
1245
|
-
return AntiPattern(
|
|
1246
|
-
id=doc["id"],
|
|
1247
|
-
agent=doc["agent"],
|
|
1248
|
-
project_id=doc["project_id"],
|
|
1249
|
-
pattern=doc["pattern"],
|
|
1250
|
-
why_bad=doc.get("why_bad", ""),
|
|
1251
|
-
better_alternative=doc.get("better_alternative", ""),
|
|
1252
|
-
occurrence_count=doc.get("occurrence_count", 1),
|
|
1253
|
-
last_seen=self._parse_datetime(doc.get("last_seen"))
|
|
1254
|
-
or datetime.now(timezone.utc),
|
|
1255
|
-
created_at=self._parse_datetime(doc.get("created_at"))
|
|
1256
|
-
or datetime.now(timezone.utc),
|
|
1257
|
-
embedding=doc.get("embedding"),
|
|
1258
|
-
metadata=doc.get("metadata", {}),
|
|
1259
|
-
)
|
|
1
|
+
"""
|
|
2
|
+
ALMA Azure Cosmos DB Storage Backend.
|
|
3
|
+
|
|
4
|
+
Production storage using Azure Cosmos DB with vector search capabilities.
|
|
5
|
+
Uses Azure Key Vault for secrets management.
|
|
6
|
+
|
|
7
|
+
Requirements:
|
|
8
|
+
pip install azure-cosmos azure-identity azure-keyvault-secrets
|
|
9
|
+
|
|
10
|
+
Configuration (config.yaml):
|
|
11
|
+
alma:
|
|
12
|
+
storage: azure
|
|
13
|
+
azure:
|
|
14
|
+
endpoint: ${AZURE_COSMOS_ENDPOINT}
|
|
15
|
+
key: ${KEYVAULT:cosmos-db-key}
|
|
16
|
+
database: alma-memory
|
|
17
|
+
embedding_dim: 384
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
import logging
|
|
21
|
+
from datetime import datetime, timezone
|
|
22
|
+
from typing import Any, Dict, List, Optional
|
|
23
|
+
|
|
24
|
+
from alma.storage.base import StorageBackend
|
|
25
|
+
from alma.storage.constants import AZURE_COSMOS_CONTAINER_NAMES, MemoryType
|
|
26
|
+
from alma.types import (
|
|
27
|
+
AntiPattern,
|
|
28
|
+
DomainKnowledge,
|
|
29
|
+
Heuristic,
|
|
30
|
+
Outcome,
|
|
31
|
+
UserPreference,
|
|
32
|
+
)
|
|
33
|
+
|
|
34
|
+
logger = logging.getLogger(__name__)
|
|
35
|
+
|
|
36
|
+
# Try to import Azure SDK
|
|
37
|
+
try:
|
|
38
|
+
from azure.cosmos import CosmosClient, PartitionKey, exceptions
|
|
39
|
+
from azure.cosmos.container import ContainerProxy
|
|
40
|
+
from azure.cosmos.database import DatabaseProxy
|
|
41
|
+
|
|
42
|
+
AZURE_COSMOS_AVAILABLE = True
|
|
43
|
+
except ImportError:
|
|
44
|
+
AZURE_COSMOS_AVAILABLE = False
|
|
45
|
+
# Define placeholders for type hints when SDK not available
|
|
46
|
+
CosmosClient = None # type: ignore
|
|
47
|
+
PartitionKey = None # type: ignore
|
|
48
|
+
exceptions = None # type: ignore
|
|
49
|
+
ContainerProxy = Any # type: ignore
|
|
50
|
+
DatabaseProxy = Any # type: ignore
|
|
51
|
+
logger.warning(
|
|
52
|
+
"azure-cosmos package not installed. Install with: pip install azure-cosmos"
|
|
53
|
+
)
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
class AzureCosmosStorage(StorageBackend):
|
|
57
|
+
"""
|
|
58
|
+
Azure Cosmos DB storage backend with vector search.
|
|
59
|
+
|
|
60
|
+
Uses:
|
|
61
|
+
- NoSQL API for document storage
|
|
62
|
+
- DiskANN vector indexing for similarity search
|
|
63
|
+
- Partition key: project_id for efficient queries
|
|
64
|
+
|
|
65
|
+
Container structure (uses canonical memory type names with alma_ prefix):
|
|
66
|
+
- alma_heuristics: Heuristics with vector embeddings
|
|
67
|
+
- alma_outcomes: Task outcomes with vector embeddings
|
|
68
|
+
- alma_preferences: User preferences (no vectors)
|
|
69
|
+
- alma_domain_knowledge: Domain knowledge with vector embeddings
|
|
70
|
+
- alma_anti_patterns: Anti-patterns with vector embeddings
|
|
71
|
+
|
|
72
|
+
Container names are derived from alma.storage.constants.AZURE_COSMOS_CONTAINER_NAMES
|
|
73
|
+
for consistency across all storage backends.
|
|
74
|
+
"""
|
|
75
|
+
|
|
76
|
+
# Use canonical container names from constants
|
|
77
|
+
CONTAINER_NAMES = AZURE_COSMOS_CONTAINER_NAMES
|
|
78
|
+
|
|
79
|
+
def __init__(
|
|
80
|
+
self,
|
|
81
|
+
endpoint: str,
|
|
82
|
+
key: str,
|
|
83
|
+
database_name: str = "alma-memory",
|
|
84
|
+
embedding_dim: int = 384,
|
|
85
|
+
create_if_not_exists: bool = True,
|
|
86
|
+
):
|
|
87
|
+
"""
|
|
88
|
+
Initialize Azure Cosmos DB storage.
|
|
89
|
+
|
|
90
|
+
Args:
|
|
91
|
+
endpoint: Cosmos DB account endpoint
|
|
92
|
+
key: Cosmos DB account key
|
|
93
|
+
database_name: Name of the database
|
|
94
|
+
embedding_dim: Dimension of embedding vectors
|
|
95
|
+
create_if_not_exists: Create database/containers if missing
|
|
96
|
+
"""
|
|
97
|
+
if not AZURE_COSMOS_AVAILABLE:
|
|
98
|
+
raise ImportError(
|
|
99
|
+
"azure-cosmos package required. Install with: pip install azure-cosmos"
|
|
100
|
+
)
|
|
101
|
+
|
|
102
|
+
self.endpoint = endpoint
|
|
103
|
+
self.database_name = database_name
|
|
104
|
+
self.embedding_dim = embedding_dim
|
|
105
|
+
|
|
106
|
+
# Initialize client
|
|
107
|
+
self.client = CosmosClient(endpoint, credential=key)
|
|
108
|
+
|
|
109
|
+
# Get or create database
|
|
110
|
+
if create_if_not_exists:
|
|
111
|
+
self.database = self.client.create_database_if_not_exists(id=database_name)
|
|
112
|
+
self._init_containers()
|
|
113
|
+
else:
|
|
114
|
+
self.database = self.client.get_database_client(database_name)
|
|
115
|
+
|
|
116
|
+
# Cache container clients
|
|
117
|
+
self._containers: Dict[str, ContainerProxy] = {}
|
|
118
|
+
for key_name, container_name in self.CONTAINER_NAMES.items():
|
|
119
|
+
self._containers[key_name] = self.database.get_container_client(
|
|
120
|
+
container_name
|
|
121
|
+
)
|
|
122
|
+
|
|
123
|
+
# Cache for partition key mappings: {container_key: {doc_id: partition_key}}
|
|
124
|
+
# This reduces RU consumption by avoiding cross-partition queries
|
|
125
|
+
self._partition_key_cache: Dict[str, Dict[str, str]] = {
|
|
126
|
+
mt: {} for mt in MemoryType.ALL
|
|
127
|
+
}
|
|
128
|
+
# Maximum cache size per container to prevent memory issues
|
|
129
|
+
self._cache_max_size = 1000
|
|
130
|
+
|
|
131
|
+
logger.info(f"Connected to Azure Cosmos DB: {database_name}")
|
|
132
|
+
|
|
133
|
+
@classmethod
|
|
134
|
+
def from_config(cls, config: Dict[str, Any]) -> "AzureCosmosStorage":
|
|
135
|
+
"""Create instance from configuration."""
|
|
136
|
+
azure_config = config.get("azure", {})
|
|
137
|
+
|
|
138
|
+
endpoint = azure_config.get("endpoint")
|
|
139
|
+
key = azure_config.get("key")
|
|
140
|
+
|
|
141
|
+
if not endpoint or not key:
|
|
142
|
+
raise ValueError(
|
|
143
|
+
"Azure Cosmos DB requires 'azure.endpoint' and 'azure.key' in config"
|
|
144
|
+
)
|
|
145
|
+
|
|
146
|
+
return cls(
|
|
147
|
+
endpoint=endpoint,
|
|
148
|
+
key=key,
|
|
149
|
+
database_name=azure_config.get("database", "alma-memory"),
|
|
150
|
+
embedding_dim=azure_config.get("embedding_dim", 384),
|
|
151
|
+
create_if_not_exists=azure_config.get("create_if_not_exists", True),
|
|
152
|
+
)
|
|
153
|
+
|
|
154
|
+
def _init_containers(self):
|
|
155
|
+
"""Initialize containers with vector search indexing."""
|
|
156
|
+
# Container configs with indexing policies (using canonical memory types)
|
|
157
|
+
container_configs = {
|
|
158
|
+
MemoryType.HEURISTICS: {
|
|
159
|
+
"partition_key": "/project_id",
|
|
160
|
+
"vector_path": "/embedding",
|
|
161
|
+
"vector_indexes": True,
|
|
162
|
+
},
|
|
163
|
+
MemoryType.OUTCOMES: {
|
|
164
|
+
"partition_key": "/project_id",
|
|
165
|
+
"vector_path": "/embedding",
|
|
166
|
+
"vector_indexes": True,
|
|
167
|
+
},
|
|
168
|
+
MemoryType.PREFERENCES: {
|
|
169
|
+
"partition_key": "/user_id",
|
|
170
|
+
"vector_path": None,
|
|
171
|
+
"vector_indexes": False,
|
|
172
|
+
},
|
|
173
|
+
MemoryType.DOMAIN_KNOWLEDGE: {
|
|
174
|
+
"partition_key": "/project_id",
|
|
175
|
+
"vector_path": "/embedding",
|
|
176
|
+
"vector_indexes": True,
|
|
177
|
+
},
|
|
178
|
+
MemoryType.ANTI_PATTERNS: {
|
|
179
|
+
"partition_key": "/project_id",
|
|
180
|
+
"vector_path": "/embedding",
|
|
181
|
+
"vector_indexes": True,
|
|
182
|
+
},
|
|
183
|
+
}
|
|
184
|
+
|
|
185
|
+
for key_name, cfg in container_configs.items():
|
|
186
|
+
container_name = self.CONTAINER_NAMES[key_name]
|
|
187
|
+
|
|
188
|
+
# Build indexing policy
|
|
189
|
+
indexing_policy = {
|
|
190
|
+
"indexingMode": "consistent",
|
|
191
|
+
"automatic": True,
|
|
192
|
+
"includedPaths": [{"path": "/*"}],
|
|
193
|
+
"excludedPaths": [{"path": '/"_etag"/?'}],
|
|
194
|
+
}
|
|
195
|
+
|
|
196
|
+
# Add vector embedding policy if needed
|
|
197
|
+
vector_embedding_policy = None
|
|
198
|
+
if cfg["vector_indexes"] and cfg["vector_path"]:
|
|
199
|
+
# Exclude vector path from regular indexing
|
|
200
|
+
indexing_policy["excludedPaths"].append(
|
|
201
|
+
{"path": f"{cfg['vector_path']}/*"}
|
|
202
|
+
)
|
|
203
|
+
|
|
204
|
+
# Vector embedding policy for DiskANN
|
|
205
|
+
vector_embedding_policy = {
|
|
206
|
+
"vectorEmbeddings": [
|
|
207
|
+
{
|
|
208
|
+
"path": cfg["vector_path"],
|
|
209
|
+
"dataType": "float32",
|
|
210
|
+
"dimensions": self.embedding_dim,
|
|
211
|
+
"distanceFunction": "cosine",
|
|
212
|
+
}
|
|
213
|
+
]
|
|
214
|
+
}
|
|
215
|
+
|
|
216
|
+
try:
|
|
217
|
+
container_properties = {
|
|
218
|
+
"id": container_name,
|
|
219
|
+
"partition_key": PartitionKey(path=cfg["partition_key"]),
|
|
220
|
+
"indexing_policy": indexing_policy,
|
|
221
|
+
}
|
|
222
|
+
|
|
223
|
+
if vector_embedding_policy:
|
|
224
|
+
container_properties["vector_embedding_policy"] = (
|
|
225
|
+
vector_embedding_policy
|
|
226
|
+
)
|
|
227
|
+
|
|
228
|
+
self.database.create_container_if_not_exists(**container_properties)
|
|
229
|
+
logger.debug(f"Container ready: {container_name}")
|
|
230
|
+
|
|
231
|
+
except exceptions.CosmosHttpResponseError as e:
|
|
232
|
+
if e.status_code == 409:
|
|
233
|
+
logger.debug(f"Container already exists: {container_name}")
|
|
234
|
+
else:
|
|
235
|
+
raise
|
|
236
|
+
|
|
237
|
+
def _get_container(self, container_key: str) -> ContainerProxy:
|
|
238
|
+
"""Get container client by key."""
|
|
239
|
+
return self._containers[container_key]
|
|
240
|
+
|
|
241
|
+
def _cache_partition_key(
|
|
242
|
+
self, container_key: str, doc_id: str, partition_key: str
|
|
243
|
+
) -> None:
|
|
244
|
+
"""
|
|
245
|
+
Cache the partition key mapping for a document.
|
|
246
|
+
|
|
247
|
+
This enables point reads for future operations, reducing RU consumption
|
|
248
|
+
by avoiding expensive cross-partition queries.
|
|
249
|
+
"""
|
|
250
|
+
cache = self._partition_key_cache[container_key]
|
|
251
|
+
|
|
252
|
+
# Evict oldest entries if cache is full (simple FIFO eviction)
|
|
253
|
+
if len(cache) >= self._cache_max_size:
|
|
254
|
+
# Remove first 10% of entries
|
|
255
|
+
keys_to_remove = list(cache.keys())[: self._cache_max_size // 10]
|
|
256
|
+
for key in keys_to_remove:
|
|
257
|
+
del cache[key]
|
|
258
|
+
|
|
259
|
+
cache[doc_id] = partition_key
|
|
260
|
+
|
|
261
|
+
def _get_cached_partition_key(
|
|
262
|
+
self, container_key: str, doc_id: str
|
|
263
|
+
) -> Optional[str]:
|
|
264
|
+
"""
|
|
265
|
+
Get cached partition key for a document if available.
|
|
266
|
+
|
|
267
|
+
Returns None if the partition key is not cached.
|
|
268
|
+
"""
|
|
269
|
+
return self._partition_key_cache.get(container_key, {}).get(doc_id)
|
|
270
|
+
|
|
271
|
+
def _invalidate_partition_key_cache(self, container_key: str, doc_id: str) -> None:
|
|
272
|
+
"""Remove a document from the partition key cache."""
|
|
273
|
+
cache = self._partition_key_cache.get(container_key, {})
|
|
274
|
+
cache.pop(doc_id, None)
|
|
275
|
+
|
|
276
|
+
def _point_read_document(
|
|
277
|
+
self,
|
|
278
|
+
container_key: str,
|
|
279
|
+
doc_id: str,
|
|
280
|
+
partition_key: Optional[str] = None,
|
|
281
|
+
) -> Optional[Dict[str, Any]]:
|
|
282
|
+
"""
|
|
283
|
+
Attempt to read a document using a point read (1 RU) instead of a query.
|
|
284
|
+
|
|
285
|
+
If partition_key is provided, performs a direct point read.
|
|
286
|
+
If partition_key is not provided but is cached, uses the cached value.
|
|
287
|
+
If neither is available, falls back to a cross-partition query.
|
|
288
|
+
|
|
289
|
+
Args:
|
|
290
|
+
container_key: The container key (e.g., 'heuristics', 'knowledge')
|
|
291
|
+
doc_id: The document ID
|
|
292
|
+
partition_key: Optional partition key for direct point read
|
|
293
|
+
|
|
294
|
+
Returns:
|
|
295
|
+
The document if found, None otherwise
|
|
296
|
+
"""
|
|
297
|
+
container = self._get_container(container_key)
|
|
298
|
+
|
|
299
|
+
# Try to get partition key from cache if not provided
|
|
300
|
+
if partition_key is None:
|
|
301
|
+
partition_key = self._get_cached_partition_key(container_key, doc_id)
|
|
302
|
+
|
|
303
|
+
# If we have a partition key, use point read (1 RU)
|
|
304
|
+
if partition_key is not None:
|
|
305
|
+
try:
|
|
306
|
+
doc = container.read_item(item=doc_id, partition_key=partition_key)
|
|
307
|
+
# Refresh cache on successful read
|
|
308
|
+
self._cache_partition_key(container_key, doc_id, partition_key)
|
|
309
|
+
return doc
|
|
310
|
+
except exceptions.CosmosResourceNotFoundError:
|
|
311
|
+
# Document not found or partition key was wrong
|
|
312
|
+
self._invalidate_partition_key_cache(container_key, doc_id)
|
|
313
|
+
# Fall through to cross-partition query
|
|
314
|
+
except Exception as e:
|
|
315
|
+
logger.warning(f"Point read failed for {doc_id}: {e}")
|
|
316
|
+
# Fall through to cross-partition query
|
|
317
|
+
|
|
318
|
+
# Fallback: Cross-partition query (expensive but necessary without partition key)
|
|
319
|
+
logger.debug(
|
|
320
|
+
f"Using cross-partition query for {doc_id} in {container_key} "
|
|
321
|
+
"(consider providing project_id for better performance)"
|
|
322
|
+
)
|
|
323
|
+
query = "SELECT * FROM c WHERE c.id = @id"
|
|
324
|
+
items = list(
|
|
325
|
+
container.query_items(
|
|
326
|
+
query=query,
|
|
327
|
+
parameters=[{"name": "@id", "value": doc_id}],
|
|
328
|
+
enable_cross_partition_query=True,
|
|
329
|
+
)
|
|
330
|
+
)
|
|
331
|
+
|
|
332
|
+
if items:
|
|
333
|
+
doc = items[0]
|
|
334
|
+
# Cache the partition key for future operations
|
|
335
|
+
pk_field = self._get_partition_key_field(container_key)
|
|
336
|
+
if pk_field and pk_field in doc:
|
|
337
|
+
self._cache_partition_key(container_key, doc_id, doc[pk_field])
|
|
338
|
+
return doc
|
|
339
|
+
|
|
340
|
+
return None
|
|
341
|
+
|
|
342
|
+
def _get_partition_key_field(self, container_key: str) -> Optional[str]:
|
|
343
|
+
"""Get the partition key field name for a container."""
|
|
344
|
+
partition_key_fields = {
|
|
345
|
+
MemoryType.HEURISTICS: "project_id",
|
|
346
|
+
MemoryType.OUTCOMES: "project_id",
|
|
347
|
+
MemoryType.PREFERENCES: "user_id",
|
|
348
|
+
MemoryType.DOMAIN_KNOWLEDGE: "project_id",
|
|
349
|
+
MemoryType.ANTI_PATTERNS: "project_id",
|
|
350
|
+
}
|
|
351
|
+
return partition_key_fields.get(container_key)
|
|
352
|
+
|
|
353
|
+
# ==================== WRITE OPERATIONS ====================
|
|
354
|
+
|
|
355
|
+
def save_heuristic(self, heuristic: Heuristic) -> str:
|
|
356
|
+
"""Save a heuristic."""
|
|
357
|
+
container = self._get_container(MemoryType.HEURISTICS)
|
|
358
|
+
|
|
359
|
+
doc = {
|
|
360
|
+
"id": heuristic.id,
|
|
361
|
+
"agent": heuristic.agent,
|
|
362
|
+
"project_id": heuristic.project_id,
|
|
363
|
+
"condition": heuristic.condition,
|
|
364
|
+
"strategy": heuristic.strategy,
|
|
365
|
+
"confidence": heuristic.confidence,
|
|
366
|
+
"occurrence_count": heuristic.occurrence_count,
|
|
367
|
+
"success_count": heuristic.success_count,
|
|
368
|
+
"last_validated": (
|
|
369
|
+
heuristic.last_validated.isoformat()
|
|
370
|
+
if heuristic.last_validated
|
|
371
|
+
else None
|
|
372
|
+
),
|
|
373
|
+
"created_at": (
|
|
374
|
+
heuristic.created_at.isoformat() if heuristic.created_at else None
|
|
375
|
+
),
|
|
376
|
+
"metadata": heuristic.metadata or {},
|
|
377
|
+
"embedding": heuristic.embedding,
|
|
378
|
+
"type": "heuristic",
|
|
379
|
+
}
|
|
380
|
+
|
|
381
|
+
container.upsert_item(doc)
|
|
382
|
+
# Cache partition key for efficient future updates
|
|
383
|
+
self._cache_partition_key(
|
|
384
|
+
MemoryType.HEURISTICS, heuristic.id, heuristic.project_id
|
|
385
|
+
)
|
|
386
|
+
logger.debug(f"Saved heuristic: {heuristic.id}")
|
|
387
|
+
return heuristic.id
|
|
388
|
+
|
|
389
|
+
def save_outcome(self, outcome: Outcome) -> str:
|
|
390
|
+
"""Save an outcome."""
|
|
391
|
+
container = self._get_container(MemoryType.OUTCOMES)
|
|
392
|
+
|
|
393
|
+
doc = {
|
|
394
|
+
"id": outcome.id,
|
|
395
|
+
"agent": outcome.agent,
|
|
396
|
+
"project_id": outcome.project_id,
|
|
397
|
+
"task_type": outcome.task_type,
|
|
398
|
+
"task_description": outcome.task_description,
|
|
399
|
+
"success": outcome.success,
|
|
400
|
+
"strategy_used": outcome.strategy_used,
|
|
401
|
+
"duration_ms": outcome.duration_ms,
|
|
402
|
+
"error_message": outcome.error_message,
|
|
403
|
+
"user_feedback": outcome.user_feedback,
|
|
404
|
+
"timestamp": outcome.timestamp.isoformat() if outcome.timestamp else None,
|
|
405
|
+
"metadata": outcome.metadata or {},
|
|
406
|
+
"embedding": outcome.embedding,
|
|
407
|
+
"type": "outcome",
|
|
408
|
+
}
|
|
409
|
+
|
|
410
|
+
container.upsert_item(doc)
|
|
411
|
+
# Cache partition key for efficient future updates
|
|
412
|
+
self._cache_partition_key(MemoryType.OUTCOMES, outcome.id, outcome.project_id)
|
|
413
|
+
logger.debug(f"Saved outcome: {outcome.id}")
|
|
414
|
+
return outcome.id
|
|
415
|
+
|
|
416
|
+
def save_user_preference(self, preference: UserPreference) -> str:
|
|
417
|
+
"""Save a user preference."""
|
|
418
|
+
container = self._get_container(MemoryType.PREFERENCES)
|
|
419
|
+
|
|
420
|
+
doc = {
|
|
421
|
+
"id": preference.id,
|
|
422
|
+
"user_id": preference.user_id,
|
|
423
|
+
"category": preference.category,
|
|
424
|
+
"preference": preference.preference,
|
|
425
|
+
"source": preference.source,
|
|
426
|
+
"confidence": preference.confidence,
|
|
427
|
+
"timestamp": (
|
|
428
|
+
preference.timestamp.isoformat() if preference.timestamp else None
|
|
429
|
+
),
|
|
430
|
+
"metadata": preference.metadata or {},
|
|
431
|
+
"type": "preference",
|
|
432
|
+
}
|
|
433
|
+
|
|
434
|
+
container.upsert_item(doc)
|
|
435
|
+
# Cache partition key for efficient future updates
|
|
436
|
+
self._cache_partition_key(
|
|
437
|
+
MemoryType.PREFERENCES, preference.id, preference.user_id
|
|
438
|
+
)
|
|
439
|
+
logger.debug(f"Saved preference: {preference.id}")
|
|
440
|
+
return preference.id
|
|
441
|
+
|
|
442
|
+
def save_domain_knowledge(self, knowledge: DomainKnowledge) -> str:
|
|
443
|
+
"""Save domain knowledge."""
|
|
444
|
+
container = self._get_container(MemoryType.DOMAIN_KNOWLEDGE)
|
|
445
|
+
|
|
446
|
+
doc = {
|
|
447
|
+
"id": knowledge.id,
|
|
448
|
+
"agent": knowledge.agent,
|
|
449
|
+
"project_id": knowledge.project_id,
|
|
450
|
+
"domain": knowledge.domain,
|
|
451
|
+
"fact": knowledge.fact,
|
|
452
|
+
"source": knowledge.source,
|
|
453
|
+
"confidence": knowledge.confidence,
|
|
454
|
+
"last_verified": (
|
|
455
|
+
knowledge.last_verified.isoformat() if knowledge.last_verified else None
|
|
456
|
+
),
|
|
457
|
+
"metadata": knowledge.metadata or {},
|
|
458
|
+
"embedding": knowledge.embedding,
|
|
459
|
+
"type": "domain_knowledge",
|
|
460
|
+
}
|
|
461
|
+
|
|
462
|
+
container.upsert_item(doc)
|
|
463
|
+
# Cache partition key for efficient future updates
|
|
464
|
+
self._cache_partition_key(
|
|
465
|
+
MemoryType.DOMAIN_KNOWLEDGE, knowledge.id, knowledge.project_id
|
|
466
|
+
)
|
|
467
|
+
logger.debug(f"Saved domain knowledge: {knowledge.id}")
|
|
468
|
+
return knowledge.id
|
|
469
|
+
|
|
470
|
+
def save_anti_pattern(self, anti_pattern: AntiPattern) -> str:
|
|
471
|
+
"""Save an anti-pattern."""
|
|
472
|
+
container = self._get_container(MemoryType.ANTI_PATTERNS)
|
|
473
|
+
|
|
474
|
+
doc = {
|
|
475
|
+
"id": anti_pattern.id,
|
|
476
|
+
"agent": anti_pattern.agent,
|
|
477
|
+
"project_id": anti_pattern.project_id,
|
|
478
|
+
"pattern": anti_pattern.pattern,
|
|
479
|
+
"why_bad": anti_pattern.why_bad,
|
|
480
|
+
"better_alternative": anti_pattern.better_alternative,
|
|
481
|
+
"occurrence_count": anti_pattern.occurrence_count,
|
|
482
|
+
"last_seen": (
|
|
483
|
+
anti_pattern.last_seen.isoformat() if anti_pattern.last_seen else None
|
|
484
|
+
),
|
|
485
|
+
"created_at": (
|
|
486
|
+
anti_pattern.created_at.isoformat() if anti_pattern.created_at else None
|
|
487
|
+
),
|
|
488
|
+
"metadata": anti_pattern.metadata or {},
|
|
489
|
+
"embedding": anti_pattern.embedding,
|
|
490
|
+
"type": "anti_pattern",
|
|
491
|
+
}
|
|
492
|
+
|
|
493
|
+
container.upsert_item(doc)
|
|
494
|
+
# Cache partition key for efficient future updates
|
|
495
|
+
self._cache_partition_key(
|
|
496
|
+
MemoryType.ANTI_PATTERNS, anti_pattern.id, anti_pattern.project_id
|
|
497
|
+
)
|
|
498
|
+
logger.debug(f"Saved anti-pattern: {anti_pattern.id}")
|
|
499
|
+
return anti_pattern.id
|
|
500
|
+
|
|
501
|
+
# ==================== READ OPERATIONS ====================
|
|
502
|
+
|
|
503
|
+
def get_heuristics(
|
|
504
|
+
self,
|
|
505
|
+
project_id: str,
|
|
506
|
+
agent: Optional[str] = None,
|
|
507
|
+
embedding: Optional[List[float]] = None,
|
|
508
|
+
top_k: int = 5,
|
|
509
|
+
min_confidence: float = 0.0,
|
|
510
|
+
) -> List[Heuristic]:
|
|
511
|
+
"""Get heuristics with optional vector search."""
|
|
512
|
+
container = self._get_container(MemoryType.HEURISTICS)
|
|
513
|
+
|
|
514
|
+
if embedding:
|
|
515
|
+
# Vector search query
|
|
516
|
+
query = """
|
|
517
|
+
SELECT TOP @top_k *
|
|
518
|
+
FROM c
|
|
519
|
+
WHERE c.project_id = @project_id
|
|
520
|
+
AND c.confidence >= @min_confidence
|
|
521
|
+
"""
|
|
522
|
+
if agent:
|
|
523
|
+
query += " AND c.agent = @agent"
|
|
524
|
+
query += " ORDER BY VectorDistance(c.embedding, @embedding)"
|
|
525
|
+
|
|
526
|
+
parameters = [
|
|
527
|
+
{"name": "@top_k", "value": top_k},
|
|
528
|
+
{"name": "@project_id", "value": project_id},
|
|
529
|
+
{"name": "@min_confidence", "value": min_confidence},
|
|
530
|
+
{"name": "@embedding", "value": embedding},
|
|
531
|
+
]
|
|
532
|
+
if agent:
|
|
533
|
+
parameters.append({"name": "@agent", "value": agent})
|
|
534
|
+
|
|
535
|
+
else:
|
|
536
|
+
# Regular query
|
|
537
|
+
query = """
|
|
538
|
+
SELECT TOP @top_k *
|
|
539
|
+
FROM c
|
|
540
|
+
WHERE c.project_id = @project_id
|
|
541
|
+
AND c.confidence >= @min_confidence
|
|
542
|
+
"""
|
|
543
|
+
if agent:
|
|
544
|
+
query += " AND c.agent = @agent"
|
|
545
|
+
query += " ORDER BY c.confidence DESC"
|
|
546
|
+
|
|
547
|
+
parameters = [
|
|
548
|
+
{"name": "@top_k", "value": top_k},
|
|
549
|
+
{"name": "@project_id", "value": project_id},
|
|
550
|
+
{"name": "@min_confidence", "value": min_confidence},
|
|
551
|
+
]
|
|
552
|
+
if agent:
|
|
553
|
+
parameters.append({"name": "@agent", "value": agent})
|
|
554
|
+
|
|
555
|
+
items = list(
|
|
556
|
+
container.query_items(
|
|
557
|
+
query=query,
|
|
558
|
+
parameters=parameters,
|
|
559
|
+
enable_cross_partition_query=False,
|
|
560
|
+
partition_key=project_id,
|
|
561
|
+
)
|
|
562
|
+
)
|
|
563
|
+
|
|
564
|
+
# Cache partition keys for efficient future updates
|
|
565
|
+
for doc in items:
|
|
566
|
+
self._cache_partition_key(
|
|
567
|
+
MemoryType.HEURISTICS, doc["id"], doc["project_id"]
|
|
568
|
+
)
|
|
569
|
+
|
|
570
|
+
return [self._doc_to_heuristic(doc) for doc in items]
|
|
571
|
+
|
|
572
|
+
def get_outcomes(
|
|
573
|
+
self,
|
|
574
|
+
project_id: str,
|
|
575
|
+
agent: Optional[str] = None,
|
|
576
|
+
task_type: Optional[str] = None,
|
|
577
|
+
embedding: Optional[List[float]] = None,
|
|
578
|
+
top_k: int = 5,
|
|
579
|
+
success_only: bool = False,
|
|
580
|
+
) -> List[Outcome]:
|
|
581
|
+
"""Get outcomes with optional vector search."""
|
|
582
|
+
container = self._get_container(MemoryType.OUTCOMES)
|
|
583
|
+
|
|
584
|
+
if embedding:
|
|
585
|
+
# Vector search query
|
|
586
|
+
query = """
|
|
587
|
+
SELECT TOP @top_k *
|
|
588
|
+
FROM c
|
|
589
|
+
WHERE c.project_id = @project_id
|
|
590
|
+
"""
|
|
591
|
+
parameters = [
|
|
592
|
+
{"name": "@top_k", "value": top_k},
|
|
593
|
+
{"name": "@project_id", "value": project_id},
|
|
594
|
+
{"name": "@embedding", "value": embedding},
|
|
595
|
+
]
|
|
596
|
+
|
|
597
|
+
if agent:
|
|
598
|
+
query += " AND c.agent = @agent"
|
|
599
|
+
parameters.append({"name": "@agent", "value": agent})
|
|
600
|
+
if task_type:
|
|
601
|
+
query += " AND c.task_type = @task_type"
|
|
602
|
+
parameters.append({"name": "@task_type", "value": task_type})
|
|
603
|
+
if success_only:
|
|
604
|
+
query += " AND c.success = true"
|
|
605
|
+
|
|
606
|
+
query += " ORDER BY VectorDistance(c.embedding, @embedding)"
|
|
607
|
+
|
|
608
|
+
else:
|
|
609
|
+
# Regular query
|
|
610
|
+
query = """
|
|
611
|
+
SELECT TOP @top_k *
|
|
612
|
+
FROM c
|
|
613
|
+
WHERE c.project_id = @project_id
|
|
614
|
+
"""
|
|
615
|
+
parameters = [
|
|
616
|
+
{"name": "@top_k", "value": top_k},
|
|
617
|
+
{"name": "@project_id", "value": project_id},
|
|
618
|
+
]
|
|
619
|
+
|
|
620
|
+
if agent:
|
|
621
|
+
query += " AND c.agent = @agent"
|
|
622
|
+
parameters.append({"name": "@agent", "value": agent})
|
|
623
|
+
if task_type:
|
|
624
|
+
query += " AND c.task_type = @task_type"
|
|
625
|
+
parameters.append({"name": "@task_type", "value": task_type})
|
|
626
|
+
if success_only:
|
|
627
|
+
query += " AND c.success = true"
|
|
628
|
+
|
|
629
|
+
query += " ORDER BY c.timestamp DESC"
|
|
630
|
+
|
|
631
|
+
items = list(
|
|
632
|
+
container.query_items(
|
|
633
|
+
query=query,
|
|
634
|
+
parameters=parameters,
|
|
635
|
+
enable_cross_partition_query=False,
|
|
636
|
+
partition_key=project_id,
|
|
637
|
+
)
|
|
638
|
+
)
|
|
639
|
+
|
|
640
|
+
# Cache partition keys for efficient future updates
|
|
641
|
+
for doc in items:
|
|
642
|
+
self._cache_partition_key(MemoryType.OUTCOMES, doc["id"], doc["project_id"])
|
|
643
|
+
|
|
644
|
+
return [self._doc_to_outcome(doc) for doc in items]
|
|
645
|
+
|
|
646
|
+
def get_user_preferences(
|
|
647
|
+
self,
|
|
648
|
+
user_id: str,
|
|
649
|
+
category: Optional[str] = None,
|
|
650
|
+
) -> List[UserPreference]:
|
|
651
|
+
"""Get user preferences."""
|
|
652
|
+
container = self._get_container(MemoryType.PREFERENCES)
|
|
653
|
+
|
|
654
|
+
query = "SELECT * FROM c WHERE c.user_id = @user_id"
|
|
655
|
+
parameters = [{"name": "@user_id", "value": user_id}]
|
|
656
|
+
|
|
657
|
+
if category:
|
|
658
|
+
query += " AND c.category = @category"
|
|
659
|
+
parameters.append({"name": "@category", "value": category})
|
|
660
|
+
|
|
661
|
+
items = list(
|
|
662
|
+
container.query_items(
|
|
663
|
+
query=query,
|
|
664
|
+
parameters=parameters,
|
|
665
|
+
enable_cross_partition_query=False,
|
|
666
|
+
partition_key=user_id,
|
|
667
|
+
)
|
|
668
|
+
)
|
|
669
|
+
|
|
670
|
+
# Cache partition keys for efficient future updates
|
|
671
|
+
for doc in items:
|
|
672
|
+
self._cache_partition_key(MemoryType.PREFERENCES, doc["id"], doc["user_id"])
|
|
673
|
+
|
|
674
|
+
return [self._doc_to_preference(doc) for doc in items]
|
|
675
|
+
|
|
676
|
+
def get_domain_knowledge(
|
|
677
|
+
self,
|
|
678
|
+
project_id: str,
|
|
679
|
+
agent: Optional[str] = None,
|
|
680
|
+
domain: Optional[str] = None,
|
|
681
|
+
embedding: Optional[List[float]] = None,
|
|
682
|
+
top_k: int = 5,
|
|
683
|
+
) -> List[DomainKnowledge]:
|
|
684
|
+
"""Get domain knowledge with optional vector search."""
|
|
685
|
+
container = self._get_container(MemoryType.DOMAIN_KNOWLEDGE)
|
|
686
|
+
|
|
687
|
+
if embedding:
|
|
688
|
+
query = """
|
|
689
|
+
SELECT TOP @top_k *
|
|
690
|
+
FROM c
|
|
691
|
+
WHERE c.project_id = @project_id
|
|
692
|
+
"""
|
|
693
|
+
parameters = [
|
|
694
|
+
{"name": "@top_k", "value": top_k},
|
|
695
|
+
{"name": "@project_id", "value": project_id},
|
|
696
|
+
{"name": "@embedding", "value": embedding},
|
|
697
|
+
]
|
|
698
|
+
|
|
699
|
+
if agent:
|
|
700
|
+
query += " AND c.agent = @agent"
|
|
701
|
+
parameters.append({"name": "@agent", "value": agent})
|
|
702
|
+
if domain:
|
|
703
|
+
query += " AND c.domain = @domain"
|
|
704
|
+
parameters.append({"name": "@domain", "value": domain})
|
|
705
|
+
|
|
706
|
+
query += " ORDER BY VectorDistance(c.embedding, @embedding)"
|
|
707
|
+
|
|
708
|
+
else:
|
|
709
|
+
query = """
|
|
710
|
+
SELECT TOP @top_k *
|
|
711
|
+
FROM c
|
|
712
|
+
WHERE c.project_id = @project_id
|
|
713
|
+
"""
|
|
714
|
+
parameters = [
|
|
715
|
+
{"name": "@top_k", "value": top_k},
|
|
716
|
+
{"name": "@project_id", "value": project_id},
|
|
717
|
+
]
|
|
718
|
+
|
|
719
|
+
if agent:
|
|
720
|
+
query += " AND c.agent = @agent"
|
|
721
|
+
parameters.append({"name": "@agent", "value": agent})
|
|
722
|
+
if domain:
|
|
723
|
+
query += " AND c.domain = @domain"
|
|
724
|
+
parameters.append({"name": "@domain", "value": domain})
|
|
725
|
+
|
|
726
|
+
query += " ORDER BY c.confidence DESC"
|
|
727
|
+
|
|
728
|
+
items = list(
|
|
729
|
+
container.query_items(
|
|
730
|
+
query=query,
|
|
731
|
+
parameters=parameters,
|
|
732
|
+
enable_cross_partition_query=False,
|
|
733
|
+
partition_key=project_id,
|
|
734
|
+
)
|
|
735
|
+
)
|
|
736
|
+
|
|
737
|
+
# Cache partition keys for efficient future updates
|
|
738
|
+
for doc in items:
|
|
739
|
+
self._cache_partition_key(
|
|
740
|
+
MemoryType.DOMAIN_KNOWLEDGE, doc["id"], doc["project_id"]
|
|
741
|
+
)
|
|
742
|
+
|
|
743
|
+
return [self._doc_to_domain_knowledge(doc) for doc in items]
|
|
744
|
+
|
|
745
|
+
def get_anti_patterns(
|
|
746
|
+
self,
|
|
747
|
+
project_id: str,
|
|
748
|
+
agent: Optional[str] = None,
|
|
749
|
+
embedding: Optional[List[float]] = None,
|
|
750
|
+
top_k: int = 5,
|
|
751
|
+
) -> List[AntiPattern]:
|
|
752
|
+
"""Get anti-patterns with optional vector search."""
|
|
753
|
+
container = self._get_container(MemoryType.ANTI_PATTERNS)
|
|
754
|
+
|
|
755
|
+
if embedding:
|
|
756
|
+
query = """
|
|
757
|
+
SELECT TOP @top_k *
|
|
758
|
+
FROM c
|
|
759
|
+
WHERE c.project_id = @project_id
|
|
760
|
+
"""
|
|
761
|
+
parameters = [
|
|
762
|
+
{"name": "@top_k", "value": top_k},
|
|
763
|
+
{"name": "@project_id", "value": project_id},
|
|
764
|
+
{"name": "@embedding", "value": embedding},
|
|
765
|
+
]
|
|
766
|
+
|
|
767
|
+
if agent:
|
|
768
|
+
query += " AND c.agent = @agent"
|
|
769
|
+
parameters.append({"name": "@agent", "value": agent})
|
|
770
|
+
|
|
771
|
+
query += " ORDER BY VectorDistance(c.embedding, @embedding)"
|
|
772
|
+
|
|
773
|
+
else:
|
|
774
|
+
query = """
|
|
775
|
+
SELECT TOP @top_k *
|
|
776
|
+
FROM c
|
|
777
|
+
WHERE c.project_id = @project_id
|
|
778
|
+
"""
|
|
779
|
+
parameters = [
|
|
780
|
+
{"name": "@top_k", "value": top_k},
|
|
781
|
+
{"name": "@project_id", "value": project_id},
|
|
782
|
+
]
|
|
783
|
+
|
|
784
|
+
if agent:
|
|
785
|
+
query += " AND c.agent = @agent"
|
|
786
|
+
parameters.append({"name": "@agent", "value": agent})
|
|
787
|
+
|
|
788
|
+
query += " ORDER BY c.occurrence_count DESC"
|
|
789
|
+
|
|
790
|
+
items = list(
|
|
791
|
+
container.query_items(
|
|
792
|
+
query=query,
|
|
793
|
+
parameters=parameters,
|
|
794
|
+
enable_cross_partition_query=False,
|
|
795
|
+
partition_key=project_id,
|
|
796
|
+
)
|
|
797
|
+
)
|
|
798
|
+
|
|
799
|
+
# Cache partition keys for efficient future updates
|
|
800
|
+
for doc in items:
|
|
801
|
+
self._cache_partition_key(
|
|
802
|
+
MemoryType.ANTI_PATTERNS, doc["id"], doc["project_id"]
|
|
803
|
+
)
|
|
804
|
+
|
|
805
|
+
return [self._doc_to_anti_pattern(doc) for doc in items]
|
|
806
|
+
|
|
807
|
+
# ==================== UPDATE OPERATIONS ====================
|
|
808
|
+
|
|
809
|
+
def update_heuristic(
|
|
810
|
+
self,
|
|
811
|
+
heuristic_id: str,
|
|
812
|
+
updates: Dict[str, Any],
|
|
813
|
+
project_id: Optional[str] = None,
|
|
814
|
+
) -> bool:
|
|
815
|
+
"""
|
|
816
|
+
Update a heuristic's fields.
|
|
817
|
+
|
|
818
|
+
Args:
|
|
819
|
+
heuristic_id: The heuristic document ID
|
|
820
|
+
updates: Dictionary of fields to update
|
|
821
|
+
project_id: Optional partition key for efficient point read (1 RU).
|
|
822
|
+
If not provided, will attempt cache lookup, then
|
|
823
|
+
fall back to cross-partition query (more expensive).
|
|
824
|
+
|
|
825
|
+
Returns:
|
|
826
|
+
True if update succeeded, False if document not found
|
|
827
|
+
"""
|
|
828
|
+
container = self._get_container(MemoryType.HEURISTICS)
|
|
829
|
+
|
|
830
|
+
# Use optimized point read with cache fallback
|
|
831
|
+
doc = self._point_read_document(MemoryType.HEURISTICS, heuristic_id, project_id)
|
|
832
|
+
|
|
833
|
+
if not doc:
|
|
834
|
+
return False
|
|
835
|
+
|
|
836
|
+
# Apply updates
|
|
837
|
+
for key, value in updates.items():
|
|
838
|
+
if isinstance(value, datetime):
|
|
839
|
+
doc[key] = value.isoformat()
|
|
840
|
+
else:
|
|
841
|
+
doc[key] = value
|
|
842
|
+
|
|
843
|
+
container.replace_item(item=heuristic_id, body=doc)
|
|
844
|
+
return True
|
|
845
|
+
|
|
846
|
+
def increment_heuristic_occurrence(
|
|
847
|
+
self,
|
|
848
|
+
heuristic_id: str,
|
|
849
|
+
success: bool,
|
|
850
|
+
project_id: Optional[str] = None,
|
|
851
|
+
) -> bool:
|
|
852
|
+
"""
|
|
853
|
+
Increment heuristic occurrence count.
|
|
854
|
+
|
|
855
|
+
Args:
|
|
856
|
+
heuristic_id: The heuristic document ID
|
|
857
|
+
success: Whether this occurrence was successful
|
|
858
|
+
project_id: Optional partition key for efficient point read (1 RU).
|
|
859
|
+
If not provided, will attempt cache lookup, then
|
|
860
|
+
fall back to cross-partition query (more expensive).
|
|
861
|
+
|
|
862
|
+
Returns:
|
|
863
|
+
True if update succeeded, False if document not found
|
|
864
|
+
"""
|
|
865
|
+
container = self._get_container(MemoryType.HEURISTICS)
|
|
866
|
+
|
|
867
|
+
# Use optimized point read with cache fallback
|
|
868
|
+
doc = self._point_read_document(MemoryType.HEURISTICS, heuristic_id, project_id)
|
|
869
|
+
|
|
870
|
+
if not doc:
|
|
871
|
+
return False
|
|
872
|
+
|
|
873
|
+
doc["occurrence_count"] = doc.get("occurrence_count", 0) + 1
|
|
874
|
+
if success:
|
|
875
|
+
doc["success_count"] = doc.get("success_count", 0) + 1
|
|
876
|
+
doc["last_validated"] = datetime.now(timezone.utc).isoformat()
|
|
877
|
+
|
|
878
|
+
container.replace_item(item=heuristic_id, body=doc)
|
|
879
|
+
return True
|
|
880
|
+
|
|
881
|
+
def update_heuristic_confidence(
|
|
882
|
+
self,
|
|
883
|
+
heuristic_id: str,
|
|
884
|
+
new_confidence: float,
|
|
885
|
+
project_id: Optional[str] = None,
|
|
886
|
+
) -> bool:
|
|
887
|
+
"""
|
|
888
|
+
Update confidence score for a heuristic.
|
|
889
|
+
|
|
890
|
+
Args:
|
|
891
|
+
heuristic_id: The heuristic document ID
|
|
892
|
+
new_confidence: The new confidence value
|
|
893
|
+
project_id: Optional partition key for efficient point read (1 RU).
|
|
894
|
+
If not provided, will attempt cache lookup, then
|
|
895
|
+
fall back to cross-partition query (more expensive).
|
|
896
|
+
|
|
897
|
+
Returns:
|
|
898
|
+
True if update succeeded, False if document not found
|
|
899
|
+
|
|
900
|
+
Performance Note:
|
|
901
|
+
- With project_id: 1 RU for point read + write cost
|
|
902
|
+
- With cached partition key: 1 RU for point read + write cost
|
|
903
|
+
- Without either: Cross-partition query (variable, higher RUs)
|
|
904
|
+
"""
|
|
905
|
+
container = self._get_container(MemoryType.HEURISTICS)
|
|
906
|
+
|
|
907
|
+
# Use optimized point read with cache fallback
|
|
908
|
+
doc = self._point_read_document(MemoryType.HEURISTICS, heuristic_id, project_id)
|
|
909
|
+
|
|
910
|
+
if not doc:
|
|
911
|
+
return False
|
|
912
|
+
|
|
913
|
+
doc["confidence"] = new_confidence
|
|
914
|
+
|
|
915
|
+
container.replace_item(item=heuristic_id, body=doc)
|
|
916
|
+
logger.debug(
|
|
917
|
+
f"Updated heuristic confidence: {heuristic_id} -> {new_confidence}"
|
|
918
|
+
)
|
|
919
|
+
return True
|
|
920
|
+
|
|
921
|
+
def update_knowledge_confidence(
|
|
922
|
+
self,
|
|
923
|
+
knowledge_id: str,
|
|
924
|
+
new_confidence: float,
|
|
925
|
+
project_id: Optional[str] = None,
|
|
926
|
+
) -> bool:
|
|
927
|
+
"""
|
|
928
|
+
Update confidence score for domain knowledge.
|
|
929
|
+
|
|
930
|
+
Args:
|
|
931
|
+
knowledge_id: The knowledge document ID
|
|
932
|
+
new_confidence: The new confidence value
|
|
933
|
+
project_id: Optional partition key for efficient point read (1 RU).
|
|
934
|
+
If not provided, will attempt cache lookup, then
|
|
935
|
+
fall back to cross-partition query (more expensive).
|
|
936
|
+
|
|
937
|
+
Returns:
|
|
938
|
+
True if update succeeded, False if document not found
|
|
939
|
+
|
|
940
|
+
Performance Note:
|
|
941
|
+
- With project_id: 1 RU for point read + write cost
|
|
942
|
+
- With cached partition key: 1 RU for point read + write cost
|
|
943
|
+
- Without either: Cross-partition query (variable, higher RUs)
|
|
944
|
+
"""
|
|
945
|
+
container = self._get_container(MemoryType.DOMAIN_KNOWLEDGE)
|
|
946
|
+
|
|
947
|
+
# Use optimized point read with cache fallback
|
|
948
|
+
doc = self._point_read_document(
|
|
949
|
+
MemoryType.DOMAIN_KNOWLEDGE, knowledge_id, project_id
|
|
950
|
+
)
|
|
951
|
+
|
|
952
|
+
if not doc:
|
|
953
|
+
return False
|
|
954
|
+
|
|
955
|
+
doc["confidence"] = new_confidence
|
|
956
|
+
|
|
957
|
+
container.replace_item(item=knowledge_id, body=doc)
|
|
958
|
+
logger.debug(
|
|
959
|
+
f"Updated knowledge confidence: {knowledge_id} -> {new_confidence}"
|
|
960
|
+
)
|
|
961
|
+
return True
|
|
962
|
+
|
|
963
|
+
# ==================== DELETE OPERATIONS ====================
|
|
964
|
+
|
|
965
|
+
def delete_outcomes_older_than(
|
|
966
|
+
self,
|
|
967
|
+
project_id: str,
|
|
968
|
+
older_than: datetime,
|
|
969
|
+
agent: Optional[str] = None,
|
|
970
|
+
) -> int:
|
|
971
|
+
"""Delete old outcomes."""
|
|
972
|
+
container = self._get_container(MemoryType.OUTCOMES)
|
|
973
|
+
|
|
974
|
+
query = """
|
|
975
|
+
SELECT c.id FROM c
|
|
976
|
+
WHERE c.project_id = @project_id
|
|
977
|
+
AND c.timestamp < @older_than
|
|
978
|
+
"""
|
|
979
|
+
parameters = [
|
|
980
|
+
{"name": "@project_id", "value": project_id},
|
|
981
|
+
{"name": "@older_than", "value": older_than.isoformat()},
|
|
982
|
+
]
|
|
983
|
+
|
|
984
|
+
if agent:
|
|
985
|
+
query += " AND c.agent = @agent"
|
|
986
|
+
parameters.append({"name": "@agent", "value": agent})
|
|
987
|
+
|
|
988
|
+
items = list(
|
|
989
|
+
container.query_items(
|
|
990
|
+
query=query,
|
|
991
|
+
parameters=parameters,
|
|
992
|
+
enable_cross_partition_query=False,
|
|
993
|
+
partition_key=project_id,
|
|
994
|
+
)
|
|
995
|
+
)
|
|
996
|
+
|
|
997
|
+
deleted = 0
|
|
998
|
+
for item in items:
|
|
999
|
+
try:
|
|
1000
|
+
container.delete_item(item=item["id"], partition_key=project_id)
|
|
1001
|
+
deleted += 1
|
|
1002
|
+
except exceptions.CosmosResourceNotFoundError:
|
|
1003
|
+
pass
|
|
1004
|
+
|
|
1005
|
+
logger.info(f"Deleted {deleted} old outcomes")
|
|
1006
|
+
return deleted
|
|
1007
|
+
|
|
1008
|
+
def delete_low_confidence_heuristics(
|
|
1009
|
+
self,
|
|
1010
|
+
project_id: str,
|
|
1011
|
+
below_confidence: float,
|
|
1012
|
+
agent: Optional[str] = None,
|
|
1013
|
+
) -> int:
|
|
1014
|
+
"""Delete low-confidence heuristics."""
|
|
1015
|
+
container = self._get_container(MemoryType.HEURISTICS)
|
|
1016
|
+
|
|
1017
|
+
query = """
|
|
1018
|
+
SELECT c.id FROM c
|
|
1019
|
+
WHERE c.project_id = @project_id
|
|
1020
|
+
AND c.confidence < @below_confidence
|
|
1021
|
+
"""
|
|
1022
|
+
parameters = [
|
|
1023
|
+
{"name": "@project_id", "value": project_id},
|
|
1024
|
+
{"name": "@below_confidence", "value": below_confidence},
|
|
1025
|
+
]
|
|
1026
|
+
|
|
1027
|
+
if agent:
|
|
1028
|
+
query += " AND c.agent = @agent"
|
|
1029
|
+
parameters.append({"name": "@agent", "value": agent})
|
|
1030
|
+
|
|
1031
|
+
items = list(
|
|
1032
|
+
container.query_items(
|
|
1033
|
+
query=query,
|
|
1034
|
+
parameters=parameters,
|
|
1035
|
+
enable_cross_partition_query=False,
|
|
1036
|
+
partition_key=project_id,
|
|
1037
|
+
)
|
|
1038
|
+
)
|
|
1039
|
+
|
|
1040
|
+
deleted = 0
|
|
1041
|
+
for item in items:
|
|
1042
|
+
try:
|
|
1043
|
+
container.delete_item(item=item["id"], partition_key=project_id)
|
|
1044
|
+
deleted += 1
|
|
1045
|
+
except exceptions.CosmosResourceNotFoundError:
|
|
1046
|
+
pass
|
|
1047
|
+
|
|
1048
|
+
logger.info(f"Deleted {deleted} low-confidence heuristics")
|
|
1049
|
+
return deleted
|
|
1050
|
+
|
|
1051
|
+
def delete_heuristic(
|
|
1052
|
+
self, heuristic_id: str, project_id: Optional[str] = None
|
|
1053
|
+
) -> bool:
|
|
1054
|
+
"""
|
|
1055
|
+
Delete a specific heuristic by ID.
|
|
1056
|
+
|
|
1057
|
+
Args:
|
|
1058
|
+
heuristic_id: The heuristic document ID
|
|
1059
|
+
project_id: Optional partition key for efficient point read (1 RU).
|
|
1060
|
+
If not provided, will attempt cache lookup, then
|
|
1061
|
+
fall back to cross-partition query (more expensive).
|
|
1062
|
+
|
|
1063
|
+
Returns:
|
|
1064
|
+
True if deletion succeeded, False if document not found
|
|
1065
|
+
"""
|
|
1066
|
+
container = self._get_container(MemoryType.HEURISTICS)
|
|
1067
|
+
|
|
1068
|
+
# Try to get partition key from cache if not provided
|
|
1069
|
+
if project_id is None:
|
|
1070
|
+
project_id = self._get_cached_partition_key(
|
|
1071
|
+
MemoryType.HEURISTICS, heuristic_id
|
|
1072
|
+
)
|
|
1073
|
+
|
|
1074
|
+
# If we have a partition key, try direct delete
|
|
1075
|
+
if project_id is not None:
|
|
1076
|
+
try:
|
|
1077
|
+
container.delete_item(item=heuristic_id, partition_key=project_id)
|
|
1078
|
+
self._invalidate_partition_key_cache(
|
|
1079
|
+
MemoryType.HEURISTICS, heuristic_id
|
|
1080
|
+
)
|
|
1081
|
+
return True
|
|
1082
|
+
except exceptions.CosmosResourceNotFoundError:
|
|
1083
|
+
# Document not found or partition key was wrong
|
|
1084
|
+
self._invalidate_partition_key_cache(
|
|
1085
|
+
MemoryType.HEURISTICS, heuristic_id
|
|
1086
|
+
)
|
|
1087
|
+
# Fall through to cross-partition lookup
|
|
1088
|
+
|
|
1089
|
+
# Fallback: Cross-partition query to find the document
|
|
1090
|
+
logger.debug(
|
|
1091
|
+
f"Using cross-partition query for delete {heuristic_id} "
|
|
1092
|
+
"(consider providing project_id for better performance)"
|
|
1093
|
+
)
|
|
1094
|
+
doc = self._point_read_document(MemoryType.HEURISTICS, heuristic_id, None)
|
|
1095
|
+
|
|
1096
|
+
if not doc:
|
|
1097
|
+
return False
|
|
1098
|
+
|
|
1099
|
+
project_id = doc["project_id"]
|
|
1100
|
+
|
|
1101
|
+
try:
|
|
1102
|
+
container.delete_item(item=heuristic_id, partition_key=project_id)
|
|
1103
|
+
self._invalidate_partition_key_cache(MemoryType.HEURISTICS, heuristic_id)
|
|
1104
|
+
return True
|
|
1105
|
+
except exceptions.CosmosResourceNotFoundError:
|
|
1106
|
+
return False
|
|
1107
|
+
|
|
1108
|
+
# ==================== STATS ====================
|
|
1109
|
+
|
|
1110
|
+
def get_stats(
|
|
1111
|
+
self,
|
|
1112
|
+
project_id: str,
|
|
1113
|
+
agent: Optional[str] = None,
|
|
1114
|
+
) -> Dict[str, Any]:
|
|
1115
|
+
"""Get memory statistics."""
|
|
1116
|
+
stats = {
|
|
1117
|
+
"project_id": project_id,
|
|
1118
|
+
"agent": agent,
|
|
1119
|
+
"storage_type": "azure_cosmos",
|
|
1120
|
+
"database": self.database_name,
|
|
1121
|
+
}
|
|
1122
|
+
|
|
1123
|
+
# Count items in each container using canonical memory types
|
|
1124
|
+
for memory_type in MemoryType.ALL:
|
|
1125
|
+
container = self._get_container(memory_type)
|
|
1126
|
+
|
|
1127
|
+
if memory_type == MemoryType.PREFERENCES:
|
|
1128
|
+
# Preferences use user_id, not project_id
|
|
1129
|
+
result = list(
|
|
1130
|
+
container.query_items(
|
|
1131
|
+
query="SELECT VALUE COUNT(1) FROM c",
|
|
1132
|
+
enable_cross_partition_query=True,
|
|
1133
|
+
)
|
|
1134
|
+
)
|
|
1135
|
+
else:
|
|
1136
|
+
query = "SELECT VALUE COUNT(1) FROM c WHERE c.project_id = @project_id"
|
|
1137
|
+
parameters = [{"name": "@project_id", "value": project_id}]
|
|
1138
|
+
|
|
1139
|
+
if agent:
|
|
1140
|
+
query = """
|
|
1141
|
+
SELECT VALUE COUNT(1) FROM c
|
|
1142
|
+
WHERE c.project_id = @project_id AND c.agent = @agent
|
|
1143
|
+
"""
|
|
1144
|
+
parameters.append({"name": "@agent", "value": agent})
|
|
1145
|
+
|
|
1146
|
+
result = list(
|
|
1147
|
+
container.query_items(
|
|
1148
|
+
query=query,
|
|
1149
|
+
parameters=parameters,
|
|
1150
|
+
enable_cross_partition_query=False,
|
|
1151
|
+
partition_key=project_id,
|
|
1152
|
+
)
|
|
1153
|
+
)
|
|
1154
|
+
stats[f"{memory_type}_count"] = result[0] if result else 0
|
|
1155
|
+
|
|
1156
|
+
stats["total_count"] = sum(
|
|
1157
|
+
stats.get(k, 0) for k in stats if k.endswith("_count")
|
|
1158
|
+
)
|
|
1159
|
+
|
|
1160
|
+
return stats
|
|
1161
|
+
|
|
1162
|
+
# ==================== HELPERS ====================
|
|
1163
|
+
|
|
1164
|
+
def _parse_datetime(self, value: Any) -> Optional[datetime]:
|
|
1165
|
+
"""Parse datetime from string."""
|
|
1166
|
+
if value is None:
|
|
1167
|
+
return None
|
|
1168
|
+
if isinstance(value, datetime):
|
|
1169
|
+
return value
|
|
1170
|
+
try:
|
|
1171
|
+
return datetime.fromisoformat(value.replace("Z", "+00:00"))
|
|
1172
|
+
except (ValueError, AttributeError):
|
|
1173
|
+
return None
|
|
1174
|
+
|
|
1175
|
+
def _doc_to_heuristic(self, doc: Dict[str, Any]) -> Heuristic:
|
|
1176
|
+
"""Convert Cosmos DB document to Heuristic."""
|
|
1177
|
+
return Heuristic(
|
|
1178
|
+
id=doc["id"],
|
|
1179
|
+
agent=doc["agent"],
|
|
1180
|
+
project_id=doc["project_id"],
|
|
1181
|
+
condition=doc["condition"],
|
|
1182
|
+
strategy=doc["strategy"],
|
|
1183
|
+
confidence=doc.get("confidence", 0.0),
|
|
1184
|
+
occurrence_count=doc.get("occurrence_count", 0),
|
|
1185
|
+
success_count=doc.get("success_count", 0),
|
|
1186
|
+
last_validated=self._parse_datetime(doc.get("last_validated"))
|
|
1187
|
+
or datetime.now(timezone.utc),
|
|
1188
|
+
created_at=self._parse_datetime(doc.get("created_at"))
|
|
1189
|
+
or datetime.now(timezone.utc),
|
|
1190
|
+
embedding=doc.get("embedding"),
|
|
1191
|
+
metadata=doc.get("metadata", {}),
|
|
1192
|
+
)
|
|
1193
|
+
|
|
1194
|
+
def _doc_to_outcome(self, doc: Dict[str, Any]) -> Outcome:
|
|
1195
|
+
"""Convert Cosmos DB document to Outcome."""
|
|
1196
|
+
return Outcome(
|
|
1197
|
+
id=doc["id"],
|
|
1198
|
+
agent=doc["agent"],
|
|
1199
|
+
project_id=doc["project_id"],
|
|
1200
|
+
task_type=doc.get("task_type", "general"),
|
|
1201
|
+
task_description=doc["task_description"],
|
|
1202
|
+
success=doc.get("success", False),
|
|
1203
|
+
strategy_used=doc.get("strategy_used", ""),
|
|
1204
|
+
duration_ms=doc.get("duration_ms"),
|
|
1205
|
+
error_message=doc.get("error_message"),
|
|
1206
|
+
user_feedback=doc.get("user_feedback"),
|
|
1207
|
+
timestamp=self._parse_datetime(doc.get("timestamp"))
|
|
1208
|
+
or datetime.now(timezone.utc),
|
|
1209
|
+
embedding=doc.get("embedding"),
|
|
1210
|
+
metadata=doc.get("metadata", {}),
|
|
1211
|
+
)
|
|
1212
|
+
|
|
1213
|
+
def _doc_to_preference(self, doc: Dict[str, Any]) -> UserPreference:
|
|
1214
|
+
"""Convert Cosmos DB document to UserPreference."""
|
|
1215
|
+
return UserPreference(
|
|
1216
|
+
id=doc["id"],
|
|
1217
|
+
user_id=doc["user_id"],
|
|
1218
|
+
category=doc.get("category", "general"),
|
|
1219
|
+
preference=doc["preference"],
|
|
1220
|
+
source=doc.get("source", "unknown"),
|
|
1221
|
+
confidence=doc.get("confidence", 1.0),
|
|
1222
|
+
timestamp=self._parse_datetime(doc.get("timestamp"))
|
|
1223
|
+
or datetime.now(timezone.utc),
|
|
1224
|
+
metadata=doc.get("metadata", {}),
|
|
1225
|
+
)
|
|
1226
|
+
|
|
1227
|
+
def _doc_to_domain_knowledge(self, doc: Dict[str, Any]) -> DomainKnowledge:
|
|
1228
|
+
"""Convert Cosmos DB document to DomainKnowledge."""
|
|
1229
|
+
return DomainKnowledge(
|
|
1230
|
+
id=doc["id"],
|
|
1231
|
+
agent=doc["agent"],
|
|
1232
|
+
project_id=doc["project_id"],
|
|
1233
|
+
domain=doc.get("domain", "general"),
|
|
1234
|
+
fact=doc["fact"],
|
|
1235
|
+
source=doc.get("source", "unknown"),
|
|
1236
|
+
confidence=doc.get("confidence", 1.0),
|
|
1237
|
+
last_verified=self._parse_datetime(doc.get("last_verified"))
|
|
1238
|
+
or datetime.now(timezone.utc),
|
|
1239
|
+
embedding=doc.get("embedding"),
|
|
1240
|
+
metadata=doc.get("metadata", {}),
|
|
1241
|
+
)
|
|
1242
|
+
|
|
1243
|
+
def _doc_to_anti_pattern(self, doc: Dict[str, Any]) -> AntiPattern:
|
|
1244
|
+
"""Convert Cosmos DB document to AntiPattern."""
|
|
1245
|
+
return AntiPattern(
|
|
1246
|
+
id=doc["id"],
|
|
1247
|
+
agent=doc["agent"],
|
|
1248
|
+
project_id=doc["project_id"],
|
|
1249
|
+
pattern=doc["pattern"],
|
|
1250
|
+
why_bad=doc.get("why_bad", ""),
|
|
1251
|
+
better_alternative=doc.get("better_alternative", ""),
|
|
1252
|
+
occurrence_count=doc.get("occurrence_count", 1),
|
|
1253
|
+
last_seen=self._parse_datetime(doc.get("last_seen"))
|
|
1254
|
+
or datetime.now(timezone.utc),
|
|
1255
|
+
created_at=self._parse_datetime(doc.get("created_at"))
|
|
1256
|
+
or datetime.now(timezone.utc),
|
|
1257
|
+
embedding=doc.get("embedding"),
|
|
1258
|
+
metadata=doc.get("metadata", {}),
|
|
1259
|
+
)
|