alma-memory 0.4.0__py3-none-any.whl → 0.5.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- alma/__init__.py +121 -45
- alma/confidence/__init__.py +1 -1
- alma/confidence/engine.py +92 -58
- alma/confidence/types.py +34 -14
- alma/config/loader.py +3 -2
- alma/consolidation/__init__.py +23 -0
- alma/consolidation/engine.py +678 -0
- alma/consolidation/prompts.py +84 -0
- alma/core.py +136 -28
- alma/domains/__init__.py +6 -6
- alma/domains/factory.py +12 -9
- alma/domains/schemas.py +17 -3
- alma/domains/types.py +8 -4
- alma/events/__init__.py +75 -0
- alma/events/emitter.py +284 -0
- alma/events/storage_mixin.py +246 -0
- alma/events/types.py +126 -0
- alma/events/webhook.py +425 -0
- alma/exceptions.py +49 -0
- alma/extraction/__init__.py +31 -0
- alma/extraction/auto_learner.py +265 -0
- alma/extraction/extractor.py +420 -0
- alma/graph/__init__.py +106 -0
- alma/graph/backends/__init__.py +32 -0
- alma/graph/backends/kuzu.py +624 -0
- alma/graph/backends/memgraph.py +432 -0
- alma/graph/backends/memory.py +236 -0
- alma/graph/backends/neo4j.py +417 -0
- alma/graph/base.py +159 -0
- alma/graph/extraction.py +198 -0
- alma/graph/store.py +860 -0
- alma/harness/__init__.py +4 -4
- alma/harness/base.py +18 -9
- alma/harness/domains.py +27 -11
- alma/initializer/__init__.py +1 -1
- alma/initializer/initializer.py +51 -43
- alma/initializer/types.py +25 -17
- alma/integration/__init__.py +9 -9
- alma/integration/claude_agents.py +32 -20
- alma/integration/helena.py +32 -22
- alma/integration/victor.py +57 -33
- alma/learning/__init__.py +27 -27
- alma/learning/forgetting.py +198 -148
- alma/learning/heuristic_extractor.py +40 -24
- alma/learning/protocols.py +65 -17
- alma/learning/validation.py +7 -2
- alma/mcp/__init__.py +4 -4
- alma/mcp/__main__.py +2 -1
- alma/mcp/resources.py +17 -16
- alma/mcp/server.py +102 -44
- alma/mcp/tools.py +180 -45
- alma/observability/__init__.py +84 -0
- alma/observability/config.py +302 -0
- alma/observability/logging.py +424 -0
- alma/observability/metrics.py +583 -0
- alma/observability/tracing.py +440 -0
- alma/progress/__init__.py +3 -3
- alma/progress/tracker.py +26 -20
- alma/progress/types.py +8 -12
- alma/py.typed +0 -0
- alma/retrieval/__init__.py +11 -11
- alma/retrieval/cache.py +20 -21
- alma/retrieval/embeddings.py +4 -4
- alma/retrieval/engine.py +179 -39
- alma/retrieval/scoring.py +73 -63
- alma/session/__init__.py +2 -2
- alma/session/manager.py +5 -5
- alma/session/types.py +5 -4
- alma/storage/__init__.py +70 -0
- alma/storage/azure_cosmos.py +414 -133
- alma/storage/base.py +215 -4
- alma/storage/chroma.py +1443 -0
- alma/storage/constants.py +103 -0
- alma/storage/file_based.py +59 -28
- alma/storage/migrations/__init__.py +21 -0
- alma/storage/migrations/base.py +321 -0
- alma/storage/migrations/runner.py +323 -0
- alma/storage/migrations/version_stores.py +337 -0
- alma/storage/migrations/versions/__init__.py +11 -0
- alma/storage/migrations/versions/v1_0_0.py +373 -0
- alma/storage/pinecone.py +1080 -0
- alma/storage/postgresql.py +1559 -0
- alma/storage/qdrant.py +1306 -0
- alma/storage/sqlite_local.py +504 -60
- alma/testing/__init__.py +46 -0
- alma/testing/factories.py +301 -0
- alma/testing/mocks.py +389 -0
- alma/types.py +62 -14
- alma_memory-0.5.1.dist-info/METADATA +939 -0
- alma_memory-0.5.1.dist-info/RECORD +93 -0
- {alma_memory-0.4.0.dist-info → alma_memory-0.5.1.dist-info}/WHEEL +1 -1
- alma_memory-0.4.0.dist-info/METADATA +0 -488
- alma_memory-0.4.0.dist-info/RECORD +0 -52
- {alma_memory-0.4.0.dist-info → alma_memory-0.5.1.dist-info}/top_level.txt +0 -0
alma/storage/qdrant.py
ADDED
|
@@ -0,0 +1,1306 @@
|
|
|
1
|
+
"""
|
|
2
|
+
ALMA Qdrant Storage Backend.
|
|
3
|
+
|
|
4
|
+
Production-ready storage using Qdrant vector database for efficient
|
|
5
|
+
semantic search with native vector operations.
|
|
6
|
+
|
|
7
|
+
Recommended for:
|
|
8
|
+
- High-performance vector similarity search
|
|
9
|
+
- Self-hosted or cloud Qdrant deployments
|
|
10
|
+
- Scalable production environments
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
import logging
|
|
14
|
+
import os
|
|
15
|
+
from datetime import datetime, timezone
|
|
16
|
+
from typing import Any, Dict, List, Optional
|
|
17
|
+
from uuid import uuid4
|
|
18
|
+
|
|
19
|
+
from alma.storage.base import StorageBackend
|
|
20
|
+
from alma.types import (
|
|
21
|
+
AntiPattern,
|
|
22
|
+
DomainKnowledge,
|
|
23
|
+
Heuristic,
|
|
24
|
+
Outcome,
|
|
25
|
+
UserPreference,
|
|
26
|
+
)
|
|
27
|
+
|
|
28
|
+
logger = logging.getLogger(__name__)
|
|
29
|
+
|
|
30
|
+
# Try to import qdrant-client
|
|
31
|
+
try:
|
|
32
|
+
from qdrant_client import QdrantClient
|
|
33
|
+
from qdrant_client.http import models
|
|
34
|
+
from qdrant_client.http.exceptions import UnexpectedResponse
|
|
35
|
+
|
|
36
|
+
QDRANT_AVAILABLE = True
|
|
37
|
+
except ImportError:
|
|
38
|
+
QDRANT_AVAILABLE = False
|
|
39
|
+
logger.warning(
|
|
40
|
+
"qdrant-client not installed. Install with: pip install 'alma-memory[qdrant]'"
|
|
41
|
+
)
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
class QdrantStorage(StorageBackend):
|
|
45
|
+
"""
|
|
46
|
+
Qdrant vector database storage backend.
|
|
47
|
+
|
|
48
|
+
Uses Qdrant for native vector similarity search with efficient indexing.
|
|
49
|
+
|
|
50
|
+
Collections:
|
|
51
|
+
- {prefix}heuristics: Learned strategies and rules
|
|
52
|
+
- {prefix}outcomes: Task execution records
|
|
53
|
+
- {prefix}preferences: User preferences
|
|
54
|
+
- {prefix}domain_knowledge: Domain-specific facts
|
|
55
|
+
- {prefix}anti_patterns: Patterns to avoid
|
|
56
|
+
|
|
57
|
+
Vector search:
|
|
58
|
+
- Uses cosine similarity for semantic search
|
|
59
|
+
- Supports filtering by metadata fields
|
|
60
|
+
"""
|
|
61
|
+
|
|
62
|
+
# Collection names
|
|
63
|
+
HEURISTICS = "heuristics"
|
|
64
|
+
OUTCOMES = "outcomes"
|
|
65
|
+
PREFERENCES = "preferences"
|
|
66
|
+
DOMAIN_KNOWLEDGE = "domain_knowledge"
|
|
67
|
+
ANTI_PATTERNS = "anti_patterns"
|
|
68
|
+
|
|
69
|
+
def __init__(
|
|
70
|
+
self,
|
|
71
|
+
url: str = "http://localhost:6333",
|
|
72
|
+
api_key: Optional[str] = None,
|
|
73
|
+
collection_prefix: str = "alma_",
|
|
74
|
+
embedding_dim: int = 384,
|
|
75
|
+
timeout: int = 30,
|
|
76
|
+
prefer_grpc: bool = False,
|
|
77
|
+
):
|
|
78
|
+
"""
|
|
79
|
+
Initialize Qdrant storage.
|
|
80
|
+
|
|
81
|
+
Args:
|
|
82
|
+
url: Qdrant server URL (default: http://localhost:6333)
|
|
83
|
+
api_key: Optional API key for authentication
|
|
84
|
+
collection_prefix: Prefix for collection names (default: alma_)
|
|
85
|
+
embedding_dim: Dimension of embedding vectors
|
|
86
|
+
timeout: Request timeout in seconds
|
|
87
|
+
prefer_grpc: Use gRPC instead of HTTP
|
|
88
|
+
"""
|
|
89
|
+
if not QDRANT_AVAILABLE:
|
|
90
|
+
raise ImportError(
|
|
91
|
+
"qdrant-client not installed. Install with: pip install 'alma-memory[qdrant]'"
|
|
92
|
+
)
|
|
93
|
+
|
|
94
|
+
self.url = url
|
|
95
|
+
self.api_key = api_key
|
|
96
|
+
self.collection_prefix = collection_prefix
|
|
97
|
+
self.embedding_dim = embedding_dim
|
|
98
|
+
self.timeout = timeout
|
|
99
|
+
|
|
100
|
+
# Initialize client
|
|
101
|
+
self._client = QdrantClient(
|
|
102
|
+
url=url,
|
|
103
|
+
api_key=api_key,
|
|
104
|
+
timeout=timeout,
|
|
105
|
+
prefer_grpc=prefer_grpc,
|
|
106
|
+
)
|
|
107
|
+
|
|
108
|
+
# Initialize collections
|
|
109
|
+
self._init_collections()
|
|
110
|
+
|
|
111
|
+
@classmethod
|
|
112
|
+
def from_config(cls, config: Dict[str, Any]) -> "QdrantStorage":
|
|
113
|
+
"""Create instance from configuration."""
|
|
114
|
+
qdrant_config = config.get("qdrant", {})
|
|
115
|
+
|
|
116
|
+
# Support environment variable expansion
|
|
117
|
+
def get_value(key: str, default: Any = None) -> Any:
|
|
118
|
+
value = qdrant_config.get(key, default)
|
|
119
|
+
if (
|
|
120
|
+
isinstance(value, str)
|
|
121
|
+
and value.startswith("${")
|
|
122
|
+
and value.endswith("}")
|
|
123
|
+
):
|
|
124
|
+
env_var = value[2:-1]
|
|
125
|
+
return os.environ.get(env_var, default)
|
|
126
|
+
return value
|
|
127
|
+
|
|
128
|
+
return cls(
|
|
129
|
+
url=get_value("url", "http://localhost:6333"),
|
|
130
|
+
api_key=get_value("api_key"),
|
|
131
|
+
collection_prefix=get_value("collection_prefix", "alma_"),
|
|
132
|
+
embedding_dim=int(config.get("embedding_dim", 384)),
|
|
133
|
+
timeout=int(get_value("timeout", 30)),
|
|
134
|
+
prefer_grpc=bool(get_value("prefer_grpc", False)),
|
|
135
|
+
)
|
|
136
|
+
|
|
137
|
+
def _collection_name(self, base_name: str) -> str:
|
|
138
|
+
"""Get full collection name with prefix."""
|
|
139
|
+
return f"{self.collection_prefix}{base_name}"
|
|
140
|
+
|
|
141
|
+
def _init_collections(self) -> None:
|
|
142
|
+
"""Initialize all required collections."""
|
|
143
|
+
collections = [
|
|
144
|
+
self.HEURISTICS,
|
|
145
|
+
self.OUTCOMES,
|
|
146
|
+
self.PREFERENCES,
|
|
147
|
+
self.DOMAIN_KNOWLEDGE,
|
|
148
|
+
self.ANTI_PATTERNS,
|
|
149
|
+
]
|
|
150
|
+
|
|
151
|
+
for collection in collections:
|
|
152
|
+
full_name = self._collection_name(collection)
|
|
153
|
+
try:
|
|
154
|
+
# Check if collection exists
|
|
155
|
+
self._client.get_collection(full_name)
|
|
156
|
+
logger.debug(f"Collection {full_name} already exists")
|
|
157
|
+
except (UnexpectedResponse, Exception):
|
|
158
|
+
# Create collection
|
|
159
|
+
self._client.create_collection(
|
|
160
|
+
collection_name=full_name,
|
|
161
|
+
vectors_config=models.VectorParams(
|
|
162
|
+
size=self.embedding_dim,
|
|
163
|
+
distance=models.Distance.COSINE,
|
|
164
|
+
),
|
|
165
|
+
)
|
|
166
|
+
logger.info(f"Created collection: {full_name}")
|
|
167
|
+
|
|
168
|
+
def _generate_id(self) -> str:
|
|
169
|
+
"""Generate a unique ID."""
|
|
170
|
+
return str(uuid4())
|
|
171
|
+
|
|
172
|
+
def _datetime_to_str(self, dt: Optional[datetime]) -> Optional[str]:
|
|
173
|
+
"""Convert datetime to ISO string."""
|
|
174
|
+
if dt is None:
|
|
175
|
+
return None
|
|
176
|
+
return dt.isoformat()
|
|
177
|
+
|
|
178
|
+
def _datetime_to_timestamp(self, dt: Optional[datetime]) -> Optional[float]:
|
|
179
|
+
"""Convert datetime to Unix timestamp for range filtering."""
|
|
180
|
+
if dt is None:
|
|
181
|
+
return None
|
|
182
|
+
return dt.timestamp()
|
|
183
|
+
|
|
184
|
+
def _str_to_datetime(self, s: Optional[str]) -> Optional[datetime]:
|
|
185
|
+
"""Convert ISO string to datetime."""
|
|
186
|
+
if s is None:
|
|
187
|
+
return None
|
|
188
|
+
try:
|
|
189
|
+
return datetime.fromisoformat(s.replace("Z", "+00:00"))
|
|
190
|
+
except (ValueError, AttributeError):
|
|
191
|
+
return None
|
|
192
|
+
|
|
193
|
+
def _get_dummy_vector(self) -> List[float]:
|
|
194
|
+
"""Get a dummy zero vector for items without embeddings."""
|
|
195
|
+
return [0.0] * self.embedding_dim
|
|
196
|
+
|
|
197
|
+
# ==================== WRITE OPERATIONS ====================
|
|
198
|
+
|
|
199
|
+
def save_heuristic(self, heuristic: Heuristic) -> str:
|
|
200
|
+
"""Save a heuristic."""
|
|
201
|
+
collection = self._collection_name(self.HEURISTICS)
|
|
202
|
+
|
|
203
|
+
payload = {
|
|
204
|
+
"id": heuristic.id,
|
|
205
|
+
"agent": heuristic.agent,
|
|
206
|
+
"project_id": heuristic.project_id,
|
|
207
|
+
"condition": heuristic.condition,
|
|
208
|
+
"strategy": heuristic.strategy,
|
|
209
|
+
"confidence": heuristic.confidence,
|
|
210
|
+
"occurrence_count": heuristic.occurrence_count,
|
|
211
|
+
"success_count": heuristic.success_count,
|
|
212
|
+
"last_validated": self._datetime_to_str(heuristic.last_validated),
|
|
213
|
+
"created_at": self._datetime_to_str(heuristic.created_at),
|
|
214
|
+
"metadata": heuristic.metadata or {},
|
|
215
|
+
}
|
|
216
|
+
|
|
217
|
+
vector = (
|
|
218
|
+
heuristic.embedding if heuristic.embedding else self._get_dummy_vector()
|
|
219
|
+
)
|
|
220
|
+
|
|
221
|
+
self._client.upsert(
|
|
222
|
+
collection_name=collection,
|
|
223
|
+
points=[
|
|
224
|
+
models.PointStruct(
|
|
225
|
+
id=heuristic.id,
|
|
226
|
+
vector=vector,
|
|
227
|
+
payload=payload,
|
|
228
|
+
)
|
|
229
|
+
],
|
|
230
|
+
)
|
|
231
|
+
|
|
232
|
+
logger.debug(f"Saved heuristic: {heuristic.id}")
|
|
233
|
+
return heuristic.id
|
|
234
|
+
|
|
235
|
+
def save_outcome(self, outcome: Outcome) -> str:
|
|
236
|
+
"""Save an outcome."""
|
|
237
|
+
collection = self._collection_name(self.OUTCOMES)
|
|
238
|
+
|
|
239
|
+
payload = {
|
|
240
|
+
"id": outcome.id,
|
|
241
|
+
"agent": outcome.agent,
|
|
242
|
+
"project_id": outcome.project_id,
|
|
243
|
+
"task_type": outcome.task_type,
|
|
244
|
+
"task_description": outcome.task_description,
|
|
245
|
+
"success": outcome.success,
|
|
246
|
+
"strategy_used": outcome.strategy_used,
|
|
247
|
+
"duration_ms": outcome.duration_ms,
|
|
248
|
+
"error_message": outcome.error_message,
|
|
249
|
+
"user_feedback": outcome.user_feedback,
|
|
250
|
+
"timestamp": self._datetime_to_str(outcome.timestamp),
|
|
251
|
+
"timestamp_unix": self._datetime_to_timestamp(outcome.timestamp),
|
|
252
|
+
"metadata": outcome.metadata or {},
|
|
253
|
+
}
|
|
254
|
+
|
|
255
|
+
vector = outcome.embedding if outcome.embedding else self._get_dummy_vector()
|
|
256
|
+
|
|
257
|
+
self._client.upsert(
|
|
258
|
+
collection_name=collection,
|
|
259
|
+
points=[
|
|
260
|
+
models.PointStruct(
|
|
261
|
+
id=outcome.id,
|
|
262
|
+
vector=vector,
|
|
263
|
+
payload=payload,
|
|
264
|
+
)
|
|
265
|
+
],
|
|
266
|
+
)
|
|
267
|
+
|
|
268
|
+
logger.debug(f"Saved outcome: {outcome.id}")
|
|
269
|
+
return outcome.id
|
|
270
|
+
|
|
271
|
+
def save_user_preference(self, preference: UserPreference) -> str:
|
|
272
|
+
"""Save a user preference."""
|
|
273
|
+
collection = self._collection_name(self.PREFERENCES)
|
|
274
|
+
|
|
275
|
+
payload = {
|
|
276
|
+
"id": preference.id,
|
|
277
|
+
"user_id": preference.user_id,
|
|
278
|
+
"category": preference.category,
|
|
279
|
+
"preference": preference.preference,
|
|
280
|
+
"source": preference.source,
|
|
281
|
+
"confidence": preference.confidence,
|
|
282
|
+
"timestamp": self._datetime_to_str(preference.timestamp),
|
|
283
|
+
"metadata": preference.metadata or {},
|
|
284
|
+
}
|
|
285
|
+
|
|
286
|
+
# Preferences don't have embeddings, use dummy vector
|
|
287
|
+
vector = self._get_dummy_vector()
|
|
288
|
+
|
|
289
|
+
self._client.upsert(
|
|
290
|
+
collection_name=collection,
|
|
291
|
+
points=[
|
|
292
|
+
models.PointStruct(
|
|
293
|
+
id=preference.id,
|
|
294
|
+
vector=vector,
|
|
295
|
+
payload=payload,
|
|
296
|
+
)
|
|
297
|
+
],
|
|
298
|
+
)
|
|
299
|
+
|
|
300
|
+
logger.debug(f"Saved preference: {preference.id}")
|
|
301
|
+
return preference.id
|
|
302
|
+
|
|
303
|
+
def save_domain_knowledge(self, knowledge: DomainKnowledge) -> str:
|
|
304
|
+
"""Save domain knowledge."""
|
|
305
|
+
collection = self._collection_name(self.DOMAIN_KNOWLEDGE)
|
|
306
|
+
|
|
307
|
+
payload = {
|
|
308
|
+
"id": knowledge.id,
|
|
309
|
+
"agent": knowledge.agent,
|
|
310
|
+
"project_id": knowledge.project_id,
|
|
311
|
+
"domain": knowledge.domain,
|
|
312
|
+
"fact": knowledge.fact,
|
|
313
|
+
"source": knowledge.source,
|
|
314
|
+
"confidence": knowledge.confidence,
|
|
315
|
+
"last_verified": self._datetime_to_str(knowledge.last_verified),
|
|
316
|
+
"metadata": knowledge.metadata or {},
|
|
317
|
+
}
|
|
318
|
+
|
|
319
|
+
vector = (
|
|
320
|
+
knowledge.embedding if knowledge.embedding else self._get_dummy_vector()
|
|
321
|
+
)
|
|
322
|
+
|
|
323
|
+
self._client.upsert(
|
|
324
|
+
collection_name=collection,
|
|
325
|
+
points=[
|
|
326
|
+
models.PointStruct(
|
|
327
|
+
id=knowledge.id,
|
|
328
|
+
vector=vector,
|
|
329
|
+
payload=payload,
|
|
330
|
+
)
|
|
331
|
+
],
|
|
332
|
+
)
|
|
333
|
+
|
|
334
|
+
logger.debug(f"Saved domain knowledge: {knowledge.id}")
|
|
335
|
+
return knowledge.id
|
|
336
|
+
|
|
337
|
+
def save_anti_pattern(self, anti_pattern: AntiPattern) -> str:
|
|
338
|
+
"""Save an anti-pattern."""
|
|
339
|
+
collection = self._collection_name(self.ANTI_PATTERNS)
|
|
340
|
+
|
|
341
|
+
payload = {
|
|
342
|
+
"id": anti_pattern.id,
|
|
343
|
+
"agent": anti_pattern.agent,
|
|
344
|
+
"project_id": anti_pattern.project_id,
|
|
345
|
+
"pattern": anti_pattern.pattern,
|
|
346
|
+
"why_bad": anti_pattern.why_bad,
|
|
347
|
+
"better_alternative": anti_pattern.better_alternative,
|
|
348
|
+
"occurrence_count": anti_pattern.occurrence_count,
|
|
349
|
+
"last_seen": self._datetime_to_str(anti_pattern.last_seen),
|
|
350
|
+
"created_at": self._datetime_to_str(anti_pattern.created_at),
|
|
351
|
+
"metadata": anti_pattern.metadata or {},
|
|
352
|
+
}
|
|
353
|
+
|
|
354
|
+
vector = (
|
|
355
|
+
anti_pattern.embedding
|
|
356
|
+
if anti_pattern.embedding
|
|
357
|
+
else self._get_dummy_vector()
|
|
358
|
+
)
|
|
359
|
+
|
|
360
|
+
self._client.upsert(
|
|
361
|
+
collection_name=collection,
|
|
362
|
+
points=[
|
|
363
|
+
models.PointStruct(
|
|
364
|
+
id=anti_pattern.id,
|
|
365
|
+
vector=vector,
|
|
366
|
+
payload=payload,
|
|
367
|
+
)
|
|
368
|
+
],
|
|
369
|
+
)
|
|
370
|
+
|
|
371
|
+
logger.debug(f"Saved anti-pattern: {anti_pattern.id}")
|
|
372
|
+
return anti_pattern.id
|
|
373
|
+
|
|
374
|
+
# ==================== BATCH WRITE OPERATIONS ====================
|
|
375
|
+
|
|
376
|
+
def save_heuristics(self, heuristics: List[Heuristic]) -> List[str]:
|
|
377
|
+
"""Save multiple heuristics in a batch."""
|
|
378
|
+
if not heuristics:
|
|
379
|
+
return []
|
|
380
|
+
|
|
381
|
+
collection = self._collection_name(self.HEURISTICS)
|
|
382
|
+
|
|
383
|
+
points = []
|
|
384
|
+
for h in heuristics:
|
|
385
|
+
payload = {
|
|
386
|
+
"id": h.id,
|
|
387
|
+
"agent": h.agent,
|
|
388
|
+
"project_id": h.project_id,
|
|
389
|
+
"condition": h.condition,
|
|
390
|
+
"strategy": h.strategy,
|
|
391
|
+
"confidence": h.confidence,
|
|
392
|
+
"occurrence_count": h.occurrence_count,
|
|
393
|
+
"success_count": h.success_count,
|
|
394
|
+
"last_validated": self._datetime_to_str(h.last_validated),
|
|
395
|
+
"created_at": self._datetime_to_str(h.created_at),
|
|
396
|
+
"metadata": h.metadata or {},
|
|
397
|
+
}
|
|
398
|
+
vector = h.embedding if h.embedding else self._get_dummy_vector()
|
|
399
|
+
points.append(models.PointStruct(id=h.id, vector=vector, payload=payload))
|
|
400
|
+
|
|
401
|
+
self._client.upsert(collection_name=collection, points=points)
|
|
402
|
+
logger.debug(f"Batch saved {len(heuristics)} heuristics")
|
|
403
|
+
return [h.id for h in heuristics]
|
|
404
|
+
|
|
405
|
+
def save_outcomes(self, outcomes: List[Outcome]) -> List[str]:
|
|
406
|
+
"""Save multiple outcomes in a batch."""
|
|
407
|
+
if not outcomes:
|
|
408
|
+
return []
|
|
409
|
+
|
|
410
|
+
collection = self._collection_name(self.OUTCOMES)
|
|
411
|
+
|
|
412
|
+
points = []
|
|
413
|
+
for o in outcomes:
|
|
414
|
+
payload = {
|
|
415
|
+
"id": o.id,
|
|
416
|
+
"agent": o.agent,
|
|
417
|
+
"project_id": o.project_id,
|
|
418
|
+
"task_type": o.task_type,
|
|
419
|
+
"task_description": o.task_description,
|
|
420
|
+
"success": o.success,
|
|
421
|
+
"strategy_used": o.strategy_used,
|
|
422
|
+
"duration_ms": o.duration_ms,
|
|
423
|
+
"error_message": o.error_message,
|
|
424
|
+
"user_feedback": o.user_feedback,
|
|
425
|
+
"timestamp": self._datetime_to_str(o.timestamp),
|
|
426
|
+
"timestamp_unix": self._datetime_to_timestamp(o.timestamp),
|
|
427
|
+
"metadata": o.metadata or {},
|
|
428
|
+
}
|
|
429
|
+
vector = o.embedding if o.embedding else self._get_dummy_vector()
|
|
430
|
+
points.append(models.PointStruct(id=o.id, vector=vector, payload=payload))
|
|
431
|
+
|
|
432
|
+
self._client.upsert(collection_name=collection, points=points)
|
|
433
|
+
logger.debug(f"Batch saved {len(outcomes)} outcomes")
|
|
434
|
+
return [o.id for o in outcomes]
|
|
435
|
+
|
|
436
|
+
def save_domain_knowledge_batch(
|
|
437
|
+
self, knowledge_items: List[DomainKnowledge]
|
|
438
|
+
) -> List[str]:
|
|
439
|
+
"""Save multiple domain knowledge items in a batch."""
|
|
440
|
+
if not knowledge_items:
|
|
441
|
+
return []
|
|
442
|
+
|
|
443
|
+
collection = self._collection_name(self.DOMAIN_KNOWLEDGE)
|
|
444
|
+
|
|
445
|
+
points = []
|
|
446
|
+
for k in knowledge_items:
|
|
447
|
+
payload = {
|
|
448
|
+
"id": k.id,
|
|
449
|
+
"agent": k.agent,
|
|
450
|
+
"project_id": k.project_id,
|
|
451
|
+
"domain": k.domain,
|
|
452
|
+
"fact": k.fact,
|
|
453
|
+
"source": k.source,
|
|
454
|
+
"confidence": k.confidence,
|
|
455
|
+
"last_verified": self._datetime_to_str(k.last_verified),
|
|
456
|
+
"metadata": k.metadata or {},
|
|
457
|
+
}
|
|
458
|
+
vector = k.embedding if k.embedding else self._get_dummy_vector()
|
|
459
|
+
points.append(models.PointStruct(id=k.id, vector=vector, payload=payload))
|
|
460
|
+
|
|
461
|
+
self._client.upsert(collection_name=collection, points=points)
|
|
462
|
+
logger.debug(f"Batch saved {len(knowledge_items)} domain knowledge items")
|
|
463
|
+
return [k.id for k in knowledge_items]
|
|
464
|
+
|
|
465
|
+
# ==================== READ OPERATIONS ====================
|
|
466
|
+
|
|
467
|
+
def _build_filter(
|
|
468
|
+
self,
|
|
469
|
+
project_id: Optional[str] = None,
|
|
470
|
+
agent: Optional[str] = None,
|
|
471
|
+
agents: Optional[List[str]] = None,
|
|
472
|
+
user_id: Optional[str] = None,
|
|
473
|
+
task_type: Optional[str] = None,
|
|
474
|
+
domain: Optional[str] = None,
|
|
475
|
+
category: Optional[str] = None,
|
|
476
|
+
success_only: bool = False,
|
|
477
|
+
min_confidence: float = 0.0,
|
|
478
|
+
) -> Optional[Any]: # Returns models.Filter when qdrant-client is installed
|
|
479
|
+
"""Build a Qdrant filter from parameters."""
|
|
480
|
+
conditions = []
|
|
481
|
+
|
|
482
|
+
if project_id:
|
|
483
|
+
conditions.append(
|
|
484
|
+
models.FieldCondition(
|
|
485
|
+
key="project_id",
|
|
486
|
+
match=models.MatchValue(value=project_id),
|
|
487
|
+
)
|
|
488
|
+
)
|
|
489
|
+
|
|
490
|
+
if agent:
|
|
491
|
+
conditions.append(
|
|
492
|
+
models.FieldCondition(
|
|
493
|
+
key="agent",
|
|
494
|
+
match=models.MatchValue(value=agent),
|
|
495
|
+
)
|
|
496
|
+
)
|
|
497
|
+
|
|
498
|
+
if agents:
|
|
499
|
+
conditions.append(
|
|
500
|
+
models.FieldCondition(
|
|
501
|
+
key="agent",
|
|
502
|
+
match=models.MatchAny(any=agents),
|
|
503
|
+
)
|
|
504
|
+
)
|
|
505
|
+
|
|
506
|
+
if user_id:
|
|
507
|
+
conditions.append(
|
|
508
|
+
models.FieldCondition(
|
|
509
|
+
key="user_id",
|
|
510
|
+
match=models.MatchValue(value=user_id),
|
|
511
|
+
)
|
|
512
|
+
)
|
|
513
|
+
|
|
514
|
+
if task_type:
|
|
515
|
+
conditions.append(
|
|
516
|
+
models.FieldCondition(
|
|
517
|
+
key="task_type",
|
|
518
|
+
match=models.MatchValue(value=task_type),
|
|
519
|
+
)
|
|
520
|
+
)
|
|
521
|
+
|
|
522
|
+
if domain:
|
|
523
|
+
conditions.append(
|
|
524
|
+
models.FieldCondition(
|
|
525
|
+
key="domain",
|
|
526
|
+
match=models.MatchValue(value=domain),
|
|
527
|
+
)
|
|
528
|
+
)
|
|
529
|
+
|
|
530
|
+
if category:
|
|
531
|
+
conditions.append(
|
|
532
|
+
models.FieldCondition(
|
|
533
|
+
key="category",
|
|
534
|
+
match=models.MatchValue(value=category),
|
|
535
|
+
)
|
|
536
|
+
)
|
|
537
|
+
|
|
538
|
+
if success_only:
|
|
539
|
+
conditions.append(
|
|
540
|
+
models.FieldCondition(
|
|
541
|
+
key="success",
|
|
542
|
+
match=models.MatchValue(value=True),
|
|
543
|
+
)
|
|
544
|
+
)
|
|
545
|
+
|
|
546
|
+
if min_confidence > 0.0:
|
|
547
|
+
conditions.append(
|
|
548
|
+
models.FieldCondition(
|
|
549
|
+
key="confidence",
|
|
550
|
+
range=models.Range(gte=min_confidence),
|
|
551
|
+
)
|
|
552
|
+
)
|
|
553
|
+
|
|
554
|
+
if not conditions:
|
|
555
|
+
return None
|
|
556
|
+
|
|
557
|
+
return models.Filter(must=conditions)
|
|
558
|
+
|
|
559
|
+
def get_heuristics(
|
|
560
|
+
self,
|
|
561
|
+
project_id: str,
|
|
562
|
+
agent: Optional[str] = None,
|
|
563
|
+
embedding: Optional[List[float]] = None,
|
|
564
|
+
top_k: int = 5,
|
|
565
|
+
min_confidence: float = 0.0,
|
|
566
|
+
) -> List[Heuristic]:
|
|
567
|
+
"""Get heuristics with optional vector search."""
|
|
568
|
+
collection = self._collection_name(self.HEURISTICS)
|
|
569
|
+
|
|
570
|
+
query_filter = self._build_filter(
|
|
571
|
+
project_id=project_id,
|
|
572
|
+
agent=agent,
|
|
573
|
+
min_confidence=min_confidence,
|
|
574
|
+
)
|
|
575
|
+
|
|
576
|
+
if embedding:
|
|
577
|
+
# Vector search
|
|
578
|
+
results = self._client.search(
|
|
579
|
+
collection_name=collection,
|
|
580
|
+
query_vector=embedding,
|
|
581
|
+
query_filter=query_filter,
|
|
582
|
+
limit=top_k,
|
|
583
|
+
with_payload=True,
|
|
584
|
+
)
|
|
585
|
+
else:
|
|
586
|
+
# Scroll without vector search
|
|
587
|
+
results, _ = self._client.scroll(
|
|
588
|
+
collection_name=collection,
|
|
589
|
+
scroll_filter=query_filter,
|
|
590
|
+
limit=top_k,
|
|
591
|
+
with_payload=True,
|
|
592
|
+
with_vectors=False,
|
|
593
|
+
)
|
|
594
|
+
|
|
595
|
+
return [self._point_to_heuristic(r) for r in results]
|
|
596
|
+
|
|
597
|
+
def get_outcomes(
|
|
598
|
+
self,
|
|
599
|
+
project_id: str,
|
|
600
|
+
agent: Optional[str] = None,
|
|
601
|
+
task_type: Optional[str] = None,
|
|
602
|
+
embedding: Optional[List[float]] = None,
|
|
603
|
+
top_k: int = 5,
|
|
604
|
+
success_only: bool = False,
|
|
605
|
+
) -> List[Outcome]:
|
|
606
|
+
"""Get outcomes with optional vector search."""
|
|
607
|
+
collection = self._collection_name(self.OUTCOMES)
|
|
608
|
+
|
|
609
|
+
query_filter = self._build_filter(
|
|
610
|
+
project_id=project_id,
|
|
611
|
+
agent=agent,
|
|
612
|
+
task_type=task_type,
|
|
613
|
+
success_only=success_only,
|
|
614
|
+
)
|
|
615
|
+
|
|
616
|
+
if embedding:
|
|
617
|
+
results = self._client.search(
|
|
618
|
+
collection_name=collection,
|
|
619
|
+
query_vector=embedding,
|
|
620
|
+
query_filter=query_filter,
|
|
621
|
+
limit=top_k,
|
|
622
|
+
with_payload=True,
|
|
623
|
+
)
|
|
624
|
+
else:
|
|
625
|
+
results, _ = self._client.scroll(
|
|
626
|
+
collection_name=collection,
|
|
627
|
+
scroll_filter=query_filter,
|
|
628
|
+
limit=top_k,
|
|
629
|
+
with_payload=True,
|
|
630
|
+
with_vectors=False,
|
|
631
|
+
)
|
|
632
|
+
|
|
633
|
+
return [self._point_to_outcome(r) for r in results]
|
|
634
|
+
|
|
635
|
+
def get_user_preferences(
|
|
636
|
+
self,
|
|
637
|
+
user_id: str,
|
|
638
|
+
category: Optional[str] = None,
|
|
639
|
+
) -> List[UserPreference]:
|
|
640
|
+
"""Get user preferences."""
|
|
641
|
+
collection = self._collection_name(self.PREFERENCES)
|
|
642
|
+
|
|
643
|
+
query_filter = self._build_filter(
|
|
644
|
+
user_id=user_id,
|
|
645
|
+
category=category,
|
|
646
|
+
)
|
|
647
|
+
|
|
648
|
+
results, _ = self._client.scroll(
|
|
649
|
+
collection_name=collection,
|
|
650
|
+
scroll_filter=query_filter,
|
|
651
|
+
limit=100, # Get all preferences for user
|
|
652
|
+
with_payload=True,
|
|
653
|
+
with_vectors=False,
|
|
654
|
+
)
|
|
655
|
+
|
|
656
|
+
return [self._point_to_preference(r) for r in results]
|
|
657
|
+
|
|
658
|
+
def get_domain_knowledge(
|
|
659
|
+
self,
|
|
660
|
+
project_id: str,
|
|
661
|
+
agent: Optional[str] = None,
|
|
662
|
+
domain: Optional[str] = None,
|
|
663
|
+
embedding: Optional[List[float]] = None,
|
|
664
|
+
top_k: int = 5,
|
|
665
|
+
) -> List[DomainKnowledge]:
|
|
666
|
+
"""Get domain knowledge with optional vector search."""
|
|
667
|
+
collection = self._collection_name(self.DOMAIN_KNOWLEDGE)
|
|
668
|
+
|
|
669
|
+
query_filter = self._build_filter(
|
|
670
|
+
project_id=project_id,
|
|
671
|
+
agent=agent,
|
|
672
|
+
domain=domain,
|
|
673
|
+
)
|
|
674
|
+
|
|
675
|
+
if embedding:
|
|
676
|
+
results = self._client.search(
|
|
677
|
+
collection_name=collection,
|
|
678
|
+
query_vector=embedding,
|
|
679
|
+
query_filter=query_filter,
|
|
680
|
+
limit=top_k,
|
|
681
|
+
with_payload=True,
|
|
682
|
+
)
|
|
683
|
+
else:
|
|
684
|
+
results, _ = self._client.scroll(
|
|
685
|
+
collection_name=collection,
|
|
686
|
+
scroll_filter=query_filter,
|
|
687
|
+
limit=top_k,
|
|
688
|
+
with_payload=True,
|
|
689
|
+
with_vectors=False,
|
|
690
|
+
)
|
|
691
|
+
|
|
692
|
+
return [self._point_to_domain_knowledge(r) for r in results]
|
|
693
|
+
|
|
694
|
+
def get_anti_patterns(
|
|
695
|
+
self,
|
|
696
|
+
project_id: str,
|
|
697
|
+
agent: Optional[str] = None,
|
|
698
|
+
embedding: Optional[List[float]] = None,
|
|
699
|
+
top_k: int = 5,
|
|
700
|
+
) -> List[AntiPattern]:
|
|
701
|
+
"""Get anti-patterns with optional vector search."""
|
|
702
|
+
collection = self._collection_name(self.ANTI_PATTERNS)
|
|
703
|
+
|
|
704
|
+
query_filter = self._build_filter(
|
|
705
|
+
project_id=project_id,
|
|
706
|
+
agent=agent,
|
|
707
|
+
)
|
|
708
|
+
|
|
709
|
+
if embedding:
|
|
710
|
+
results = self._client.search(
|
|
711
|
+
collection_name=collection,
|
|
712
|
+
query_vector=embedding,
|
|
713
|
+
query_filter=query_filter,
|
|
714
|
+
limit=top_k,
|
|
715
|
+
with_payload=True,
|
|
716
|
+
)
|
|
717
|
+
else:
|
|
718
|
+
results, _ = self._client.scroll(
|
|
719
|
+
collection_name=collection,
|
|
720
|
+
scroll_filter=query_filter,
|
|
721
|
+
limit=top_k,
|
|
722
|
+
with_payload=True,
|
|
723
|
+
with_vectors=False,
|
|
724
|
+
)
|
|
725
|
+
|
|
726
|
+
return [self._point_to_anti_pattern(r) for r in results]
|
|
727
|
+
|
|
728
|
+
# ==================== MULTI-AGENT MEMORY SHARING ====================
|
|
729
|
+
|
|
730
|
+
def get_heuristics_for_agents(
|
|
731
|
+
self,
|
|
732
|
+
project_id: str,
|
|
733
|
+
agents: List[str],
|
|
734
|
+
embedding: Optional[List[float]] = None,
|
|
735
|
+
top_k: int = 5,
|
|
736
|
+
min_confidence: float = 0.0,
|
|
737
|
+
) -> List[Heuristic]:
|
|
738
|
+
"""Get heuristics from multiple agents using optimized query."""
|
|
739
|
+
if not agents:
|
|
740
|
+
return []
|
|
741
|
+
|
|
742
|
+
collection = self._collection_name(self.HEURISTICS)
|
|
743
|
+
|
|
744
|
+
query_filter = self._build_filter(
|
|
745
|
+
project_id=project_id,
|
|
746
|
+
agents=agents,
|
|
747
|
+
min_confidence=min_confidence,
|
|
748
|
+
)
|
|
749
|
+
|
|
750
|
+
if embedding:
|
|
751
|
+
results = self._client.search(
|
|
752
|
+
collection_name=collection,
|
|
753
|
+
query_vector=embedding,
|
|
754
|
+
query_filter=query_filter,
|
|
755
|
+
limit=top_k * len(agents),
|
|
756
|
+
with_payload=True,
|
|
757
|
+
)
|
|
758
|
+
else:
|
|
759
|
+
results, _ = self._client.scroll(
|
|
760
|
+
collection_name=collection,
|
|
761
|
+
scroll_filter=query_filter,
|
|
762
|
+
limit=top_k * len(agents),
|
|
763
|
+
with_payload=True,
|
|
764
|
+
with_vectors=False,
|
|
765
|
+
)
|
|
766
|
+
|
|
767
|
+
return [self._point_to_heuristic(r) for r in results]
|
|
768
|
+
|
|
769
|
+
def get_outcomes_for_agents(
|
|
770
|
+
self,
|
|
771
|
+
project_id: str,
|
|
772
|
+
agents: List[str],
|
|
773
|
+
task_type: Optional[str] = None,
|
|
774
|
+
embedding: Optional[List[float]] = None,
|
|
775
|
+
top_k: int = 5,
|
|
776
|
+
success_only: bool = False,
|
|
777
|
+
) -> List[Outcome]:
|
|
778
|
+
"""Get outcomes from multiple agents using optimized query."""
|
|
779
|
+
if not agents:
|
|
780
|
+
return []
|
|
781
|
+
|
|
782
|
+
collection = self._collection_name(self.OUTCOMES)
|
|
783
|
+
|
|
784
|
+
query_filter = self._build_filter(
|
|
785
|
+
project_id=project_id,
|
|
786
|
+
agents=agents,
|
|
787
|
+
task_type=task_type,
|
|
788
|
+
success_only=success_only,
|
|
789
|
+
)
|
|
790
|
+
|
|
791
|
+
if embedding:
|
|
792
|
+
results = self._client.search(
|
|
793
|
+
collection_name=collection,
|
|
794
|
+
query_vector=embedding,
|
|
795
|
+
query_filter=query_filter,
|
|
796
|
+
limit=top_k * len(agents),
|
|
797
|
+
with_payload=True,
|
|
798
|
+
)
|
|
799
|
+
else:
|
|
800
|
+
results, _ = self._client.scroll(
|
|
801
|
+
collection_name=collection,
|
|
802
|
+
scroll_filter=query_filter,
|
|
803
|
+
limit=top_k * len(agents),
|
|
804
|
+
with_payload=True,
|
|
805
|
+
with_vectors=False,
|
|
806
|
+
)
|
|
807
|
+
|
|
808
|
+
return [self._point_to_outcome(r) for r in results]
|
|
809
|
+
|
|
810
|
+
def get_domain_knowledge_for_agents(
|
|
811
|
+
self,
|
|
812
|
+
project_id: str,
|
|
813
|
+
agents: List[str],
|
|
814
|
+
domain: Optional[str] = None,
|
|
815
|
+
embedding: Optional[List[float]] = None,
|
|
816
|
+
top_k: int = 5,
|
|
817
|
+
) -> List[DomainKnowledge]:
|
|
818
|
+
"""Get domain knowledge from multiple agents using optimized query."""
|
|
819
|
+
if not agents:
|
|
820
|
+
return []
|
|
821
|
+
|
|
822
|
+
collection = self._collection_name(self.DOMAIN_KNOWLEDGE)
|
|
823
|
+
|
|
824
|
+
query_filter = self._build_filter(
|
|
825
|
+
project_id=project_id,
|
|
826
|
+
agents=agents,
|
|
827
|
+
domain=domain,
|
|
828
|
+
)
|
|
829
|
+
|
|
830
|
+
if embedding:
|
|
831
|
+
results = self._client.search(
|
|
832
|
+
collection_name=collection,
|
|
833
|
+
query_vector=embedding,
|
|
834
|
+
query_filter=query_filter,
|
|
835
|
+
limit=top_k * len(agents),
|
|
836
|
+
with_payload=True,
|
|
837
|
+
)
|
|
838
|
+
else:
|
|
839
|
+
results, _ = self._client.scroll(
|
|
840
|
+
collection_name=collection,
|
|
841
|
+
scroll_filter=query_filter,
|
|
842
|
+
limit=top_k * len(agents),
|
|
843
|
+
with_payload=True,
|
|
844
|
+
with_vectors=False,
|
|
845
|
+
)
|
|
846
|
+
|
|
847
|
+
return [self._point_to_domain_knowledge(r) for r in results]
|
|
848
|
+
|
|
849
|
+
def get_anti_patterns_for_agents(
|
|
850
|
+
self,
|
|
851
|
+
project_id: str,
|
|
852
|
+
agents: List[str],
|
|
853
|
+
embedding: Optional[List[float]] = None,
|
|
854
|
+
top_k: int = 5,
|
|
855
|
+
) -> List[AntiPattern]:
|
|
856
|
+
"""Get anti-patterns from multiple agents using optimized query."""
|
|
857
|
+
if not agents:
|
|
858
|
+
return []
|
|
859
|
+
|
|
860
|
+
collection = self._collection_name(self.ANTI_PATTERNS)
|
|
861
|
+
|
|
862
|
+
query_filter = self._build_filter(
|
|
863
|
+
project_id=project_id,
|
|
864
|
+
agents=agents,
|
|
865
|
+
)
|
|
866
|
+
|
|
867
|
+
if embedding:
|
|
868
|
+
results = self._client.search(
|
|
869
|
+
collection_name=collection,
|
|
870
|
+
query_vector=embedding,
|
|
871
|
+
query_filter=query_filter,
|
|
872
|
+
limit=top_k * len(agents),
|
|
873
|
+
with_payload=True,
|
|
874
|
+
)
|
|
875
|
+
else:
|
|
876
|
+
results, _ = self._client.scroll(
|
|
877
|
+
collection_name=collection,
|
|
878
|
+
scroll_filter=query_filter,
|
|
879
|
+
limit=top_k * len(agents),
|
|
880
|
+
with_payload=True,
|
|
881
|
+
with_vectors=False,
|
|
882
|
+
)
|
|
883
|
+
|
|
884
|
+
return [self._point_to_anti_pattern(r) for r in results]
|
|
885
|
+
|
|
886
|
+
# ==================== UPDATE OPERATIONS ====================
|
|
887
|
+
|
|
888
|
+
def update_heuristic(
|
|
889
|
+
self,
|
|
890
|
+
heuristic_id: str,
|
|
891
|
+
updates: Dict[str, Any],
|
|
892
|
+
) -> bool:
|
|
893
|
+
"""Update a heuristic's fields."""
|
|
894
|
+
if not updates:
|
|
895
|
+
return False
|
|
896
|
+
|
|
897
|
+
collection = self._collection_name(self.HEURISTICS)
|
|
898
|
+
|
|
899
|
+
# Convert datetime fields to strings
|
|
900
|
+
payload_updates: Dict[str, Any] = {}
|
|
901
|
+
for key, value in updates.items():
|
|
902
|
+
if isinstance(value, datetime):
|
|
903
|
+
payload_updates[key] = self._datetime_to_str(value)
|
|
904
|
+
elif key == "metadata" and isinstance(value, dict):
|
|
905
|
+
payload_updates[key] = value
|
|
906
|
+
else:
|
|
907
|
+
payload_updates[key] = value
|
|
908
|
+
|
|
909
|
+
try:
|
|
910
|
+
self._client.set_payload(
|
|
911
|
+
collection_name=collection,
|
|
912
|
+
payload=payload_updates,
|
|
913
|
+
points=[heuristic_id],
|
|
914
|
+
)
|
|
915
|
+
return True
|
|
916
|
+
except Exception as e:
|
|
917
|
+
logger.warning(f"Failed to update heuristic {heuristic_id}: {e}")
|
|
918
|
+
return False
|
|
919
|
+
|
|
920
|
+
def increment_heuristic_occurrence(
|
|
921
|
+
self,
|
|
922
|
+
heuristic_id: str,
|
|
923
|
+
success: bool,
|
|
924
|
+
) -> bool:
|
|
925
|
+
"""Increment heuristic occurrence count."""
|
|
926
|
+
collection = self._collection_name(self.HEURISTICS)
|
|
927
|
+
|
|
928
|
+
try:
|
|
929
|
+
# Get current values
|
|
930
|
+
results = self._client.retrieve(
|
|
931
|
+
collection_name=collection,
|
|
932
|
+
ids=[heuristic_id],
|
|
933
|
+
with_payload=True,
|
|
934
|
+
)
|
|
935
|
+
|
|
936
|
+
if not results:
|
|
937
|
+
return False
|
|
938
|
+
|
|
939
|
+
payload = results[0].payload or {}
|
|
940
|
+
new_occurrence = int(payload.get("occurrence_count") or 0) + 1
|
|
941
|
+
new_success = int(payload.get("success_count") or 0)
|
|
942
|
+
if success:
|
|
943
|
+
new_success += 1
|
|
944
|
+
|
|
945
|
+
self._client.set_payload(
|
|
946
|
+
collection_name=collection,
|
|
947
|
+
payload={
|
|
948
|
+
"occurrence_count": new_occurrence,
|
|
949
|
+
"success_count": new_success,
|
|
950
|
+
"last_validated": self._datetime_to_str(datetime.now(timezone.utc)),
|
|
951
|
+
},
|
|
952
|
+
points=[heuristic_id],
|
|
953
|
+
)
|
|
954
|
+
return True
|
|
955
|
+
except Exception as e:
|
|
956
|
+
logger.warning(f"Failed to increment heuristic {heuristic_id}: {e}")
|
|
957
|
+
return False
|
|
958
|
+
|
|
959
|
+
def update_heuristic_confidence(
|
|
960
|
+
self,
|
|
961
|
+
heuristic_id: str,
|
|
962
|
+
new_confidence: float,
|
|
963
|
+
) -> bool:
|
|
964
|
+
"""Update a heuristic's confidence value."""
|
|
965
|
+
collection = self._collection_name(self.HEURISTICS)
|
|
966
|
+
|
|
967
|
+
try:
|
|
968
|
+
self._client.set_payload(
|
|
969
|
+
collection_name=collection,
|
|
970
|
+
payload={"confidence": new_confidence},
|
|
971
|
+
points=[heuristic_id],
|
|
972
|
+
)
|
|
973
|
+
return True
|
|
974
|
+
except Exception as e:
|
|
975
|
+
logger.warning(f"Failed to update confidence for {heuristic_id}: {e}")
|
|
976
|
+
return False
|
|
977
|
+
|
|
978
|
+
def update_knowledge_confidence(
|
|
979
|
+
self,
|
|
980
|
+
knowledge_id: str,
|
|
981
|
+
new_confidence: float,
|
|
982
|
+
) -> bool:
|
|
983
|
+
"""Update domain knowledge confidence value."""
|
|
984
|
+
collection = self._collection_name(self.DOMAIN_KNOWLEDGE)
|
|
985
|
+
|
|
986
|
+
try:
|
|
987
|
+
self._client.set_payload(
|
|
988
|
+
collection_name=collection,
|
|
989
|
+
payload={"confidence": new_confidence},
|
|
990
|
+
points=[knowledge_id],
|
|
991
|
+
)
|
|
992
|
+
return True
|
|
993
|
+
except Exception as e:
|
|
994
|
+
logger.warning(f"Failed to update confidence for {knowledge_id}: {e}")
|
|
995
|
+
return False
|
|
996
|
+
|
|
997
|
+
# ==================== DELETE OPERATIONS ====================
|
|
998
|
+
|
|
999
|
+
def delete_heuristic(self, heuristic_id: str) -> bool:
|
|
1000
|
+
"""Delete a heuristic by ID."""
|
|
1001
|
+
collection = self._collection_name(self.HEURISTICS)
|
|
1002
|
+
|
|
1003
|
+
try:
|
|
1004
|
+
self._client.delete(
|
|
1005
|
+
collection_name=collection,
|
|
1006
|
+
points_selector=models.PointIdsList(points=[heuristic_id]),
|
|
1007
|
+
)
|
|
1008
|
+
logger.debug(f"Deleted heuristic: {heuristic_id}")
|
|
1009
|
+
return True
|
|
1010
|
+
except Exception as e:
|
|
1011
|
+
logger.warning(f"Failed to delete heuristic {heuristic_id}: {e}")
|
|
1012
|
+
return False
|
|
1013
|
+
|
|
1014
|
+
def delete_outcome(self, outcome_id: str) -> bool:
|
|
1015
|
+
"""Delete an outcome by ID."""
|
|
1016
|
+
collection = self._collection_name(self.OUTCOMES)
|
|
1017
|
+
|
|
1018
|
+
try:
|
|
1019
|
+
self._client.delete(
|
|
1020
|
+
collection_name=collection,
|
|
1021
|
+
points_selector=models.PointIdsList(points=[outcome_id]),
|
|
1022
|
+
)
|
|
1023
|
+
logger.debug(f"Deleted outcome: {outcome_id}")
|
|
1024
|
+
return True
|
|
1025
|
+
except Exception as e:
|
|
1026
|
+
logger.warning(f"Failed to delete outcome {outcome_id}: {e}")
|
|
1027
|
+
return False
|
|
1028
|
+
|
|
1029
|
+
def delete_domain_knowledge(self, knowledge_id: str) -> bool:
|
|
1030
|
+
"""Delete domain knowledge by ID."""
|
|
1031
|
+
collection = self._collection_name(self.DOMAIN_KNOWLEDGE)
|
|
1032
|
+
|
|
1033
|
+
try:
|
|
1034
|
+
self._client.delete(
|
|
1035
|
+
collection_name=collection,
|
|
1036
|
+
points_selector=models.PointIdsList(points=[knowledge_id]),
|
|
1037
|
+
)
|
|
1038
|
+
logger.debug(f"Deleted domain knowledge: {knowledge_id}")
|
|
1039
|
+
return True
|
|
1040
|
+
except Exception as e:
|
|
1041
|
+
logger.warning(f"Failed to delete domain knowledge {knowledge_id}: {e}")
|
|
1042
|
+
return False
|
|
1043
|
+
|
|
1044
|
+
def delete_anti_pattern(self, anti_pattern_id: str) -> bool:
|
|
1045
|
+
"""Delete an anti-pattern by ID."""
|
|
1046
|
+
collection = self._collection_name(self.ANTI_PATTERNS)
|
|
1047
|
+
|
|
1048
|
+
try:
|
|
1049
|
+
self._client.delete(
|
|
1050
|
+
collection_name=collection,
|
|
1051
|
+
points_selector=models.PointIdsList(points=[anti_pattern_id]),
|
|
1052
|
+
)
|
|
1053
|
+
logger.debug(f"Deleted anti-pattern: {anti_pattern_id}")
|
|
1054
|
+
return True
|
|
1055
|
+
except Exception as e:
|
|
1056
|
+
logger.warning(f"Failed to delete anti-pattern {anti_pattern_id}: {e}")
|
|
1057
|
+
return False
|
|
1058
|
+
|
|
1059
|
+
def delete_outcomes_older_than(
|
|
1060
|
+
self,
|
|
1061
|
+
project_id: str,
|
|
1062
|
+
older_than: datetime,
|
|
1063
|
+
agent: Optional[str] = None,
|
|
1064
|
+
) -> int:
|
|
1065
|
+
"""Delete old outcomes."""
|
|
1066
|
+
collection = self._collection_name(self.OUTCOMES)
|
|
1067
|
+
|
|
1068
|
+
# Build filter for deletion using Unix timestamp for range comparison
|
|
1069
|
+
conditions = [
|
|
1070
|
+
models.FieldCondition(
|
|
1071
|
+
key="project_id",
|
|
1072
|
+
match=models.MatchValue(value=project_id),
|
|
1073
|
+
),
|
|
1074
|
+
models.FieldCondition(
|
|
1075
|
+
key="timestamp_unix",
|
|
1076
|
+
range=models.Range(lt=self._datetime_to_timestamp(older_than)),
|
|
1077
|
+
),
|
|
1078
|
+
]
|
|
1079
|
+
|
|
1080
|
+
if agent:
|
|
1081
|
+
conditions.append(
|
|
1082
|
+
models.FieldCondition(
|
|
1083
|
+
key="agent",
|
|
1084
|
+
match=models.MatchValue(value=agent),
|
|
1085
|
+
)
|
|
1086
|
+
)
|
|
1087
|
+
|
|
1088
|
+
delete_filter = models.Filter(must=conditions)
|
|
1089
|
+
|
|
1090
|
+
# Get count before deletion
|
|
1091
|
+
count_before = self._client.count(
|
|
1092
|
+
collection_name=collection,
|
|
1093
|
+
count_filter=delete_filter,
|
|
1094
|
+
exact=True,
|
|
1095
|
+
).count
|
|
1096
|
+
|
|
1097
|
+
# Delete matching points
|
|
1098
|
+
self._client.delete(
|
|
1099
|
+
collection_name=collection,
|
|
1100
|
+
points_selector=models.FilterSelector(filter=delete_filter),
|
|
1101
|
+
)
|
|
1102
|
+
|
|
1103
|
+
logger.info(f"Deleted {count_before} old outcomes")
|
|
1104
|
+
return count_before
|
|
1105
|
+
|
|
1106
|
+
def delete_low_confidence_heuristics(
|
|
1107
|
+
self,
|
|
1108
|
+
project_id: str,
|
|
1109
|
+
below_confidence: float,
|
|
1110
|
+
agent: Optional[str] = None,
|
|
1111
|
+
) -> int:
|
|
1112
|
+
"""Delete low-confidence heuristics."""
|
|
1113
|
+
collection = self._collection_name(self.HEURISTICS)
|
|
1114
|
+
|
|
1115
|
+
# Build filter for deletion
|
|
1116
|
+
conditions = [
|
|
1117
|
+
models.FieldCondition(
|
|
1118
|
+
key="project_id",
|
|
1119
|
+
match=models.MatchValue(value=project_id),
|
|
1120
|
+
),
|
|
1121
|
+
models.FieldCondition(
|
|
1122
|
+
key="confidence",
|
|
1123
|
+
range=models.Range(lt=below_confidence),
|
|
1124
|
+
),
|
|
1125
|
+
]
|
|
1126
|
+
|
|
1127
|
+
if agent:
|
|
1128
|
+
conditions.append(
|
|
1129
|
+
models.FieldCondition(
|
|
1130
|
+
key="agent",
|
|
1131
|
+
match=models.MatchValue(value=agent),
|
|
1132
|
+
)
|
|
1133
|
+
)
|
|
1134
|
+
|
|
1135
|
+
delete_filter = models.Filter(must=conditions)
|
|
1136
|
+
|
|
1137
|
+
# Get count before deletion
|
|
1138
|
+
count_before = self._client.count(
|
|
1139
|
+
collection_name=collection,
|
|
1140
|
+
count_filter=delete_filter,
|
|
1141
|
+
exact=True,
|
|
1142
|
+
).count
|
|
1143
|
+
|
|
1144
|
+
# Delete matching points
|
|
1145
|
+
self._client.delete(
|
|
1146
|
+
collection_name=collection,
|
|
1147
|
+
points_selector=models.FilterSelector(filter=delete_filter),
|
|
1148
|
+
)
|
|
1149
|
+
|
|
1150
|
+
logger.info(f"Deleted {count_before} low-confidence heuristics")
|
|
1151
|
+
return count_before
|
|
1152
|
+
|
|
1153
|
+
# ==================== STATS ====================
|
|
1154
|
+
|
|
1155
|
+
def get_stats(
|
|
1156
|
+
self,
|
|
1157
|
+
project_id: str,
|
|
1158
|
+
agent: Optional[str] = None,
|
|
1159
|
+
) -> Dict[str, Any]:
|
|
1160
|
+
"""Get memory statistics."""
|
|
1161
|
+
stats: Dict[str, Any] = {
|
|
1162
|
+
"project_id": project_id,
|
|
1163
|
+
"agent": agent,
|
|
1164
|
+
"storage_type": "qdrant",
|
|
1165
|
+
"url": self.url,
|
|
1166
|
+
}
|
|
1167
|
+
|
|
1168
|
+
collections_map = {
|
|
1169
|
+
"heuristics": self.HEURISTICS,
|
|
1170
|
+
"outcomes": self.OUTCOMES,
|
|
1171
|
+
"domain_knowledge": self.DOMAIN_KNOWLEDGE,
|
|
1172
|
+
"anti_patterns": self.ANTI_PATTERNS,
|
|
1173
|
+
}
|
|
1174
|
+
|
|
1175
|
+
for stat_name, collection_base in collections_map.items():
|
|
1176
|
+
collection = self._collection_name(collection_base)
|
|
1177
|
+
|
|
1178
|
+
query_filter = self._build_filter(
|
|
1179
|
+
project_id=project_id,
|
|
1180
|
+
agent=agent,
|
|
1181
|
+
)
|
|
1182
|
+
|
|
1183
|
+
try:
|
|
1184
|
+
count = self._client.count(
|
|
1185
|
+
collection_name=collection,
|
|
1186
|
+
count_filter=query_filter,
|
|
1187
|
+
exact=True,
|
|
1188
|
+
).count
|
|
1189
|
+
stats[f"{stat_name}_count"] = count
|
|
1190
|
+
except Exception:
|
|
1191
|
+
stats[f"{stat_name}_count"] = 0
|
|
1192
|
+
|
|
1193
|
+
# Preferences don't have project_id filter
|
|
1194
|
+
try:
|
|
1195
|
+
prefs_collection = self._collection_name(self.PREFERENCES)
|
|
1196
|
+
prefs_count = self._client.count(
|
|
1197
|
+
collection_name=prefs_collection,
|
|
1198
|
+
exact=True,
|
|
1199
|
+
).count
|
|
1200
|
+
stats["preferences_count"] = prefs_count
|
|
1201
|
+
except Exception:
|
|
1202
|
+
stats["preferences_count"] = 0
|
|
1203
|
+
|
|
1204
|
+
stats["total_count"] = sum(
|
|
1205
|
+
int(stats.get(k, 0)) for k in stats if k.endswith("_count")
|
|
1206
|
+
)
|
|
1207
|
+
|
|
1208
|
+
return stats
|
|
1209
|
+
|
|
1210
|
+
# ==================== HELPERS ====================
|
|
1211
|
+
|
|
1212
|
+
def _point_to_heuristic(self, point: Any) -> Heuristic:
|
|
1213
|
+
"""Convert Qdrant point to Heuristic."""
|
|
1214
|
+
payload = point.payload
|
|
1215
|
+
return Heuristic(
|
|
1216
|
+
id=payload["id"],
|
|
1217
|
+
agent=payload["agent"],
|
|
1218
|
+
project_id=payload["project_id"],
|
|
1219
|
+
condition=payload["condition"],
|
|
1220
|
+
strategy=payload["strategy"],
|
|
1221
|
+
confidence=payload.get("confidence") or 0.0,
|
|
1222
|
+
occurrence_count=payload.get("occurrence_count") or 0,
|
|
1223
|
+
success_count=payload.get("success_count") or 0,
|
|
1224
|
+
last_validated=self._str_to_datetime(payload.get("last_validated"))
|
|
1225
|
+
or datetime.now(timezone.utc),
|
|
1226
|
+
created_at=self._str_to_datetime(payload.get("created_at"))
|
|
1227
|
+
or datetime.now(timezone.utc),
|
|
1228
|
+
embedding=getattr(point, "vector", None),
|
|
1229
|
+
metadata=payload.get("metadata") or {},
|
|
1230
|
+
)
|
|
1231
|
+
|
|
1232
|
+
def _point_to_outcome(self, point: Any) -> Outcome:
|
|
1233
|
+
"""Convert Qdrant point to Outcome."""
|
|
1234
|
+
payload = point.payload
|
|
1235
|
+
return Outcome(
|
|
1236
|
+
id=payload["id"],
|
|
1237
|
+
agent=payload["agent"],
|
|
1238
|
+
project_id=payload["project_id"],
|
|
1239
|
+
task_type=payload.get("task_type") or "general",
|
|
1240
|
+
task_description=payload["task_description"],
|
|
1241
|
+
success=bool(payload.get("success")),
|
|
1242
|
+
strategy_used=payload.get("strategy_used") or "",
|
|
1243
|
+
duration_ms=payload.get("duration_ms"),
|
|
1244
|
+
error_message=payload.get("error_message"),
|
|
1245
|
+
user_feedback=payload.get("user_feedback"),
|
|
1246
|
+
timestamp=self._str_to_datetime(payload.get("timestamp"))
|
|
1247
|
+
or datetime.now(timezone.utc),
|
|
1248
|
+
embedding=getattr(point, "vector", None),
|
|
1249
|
+
metadata=payload.get("metadata") or {},
|
|
1250
|
+
)
|
|
1251
|
+
|
|
1252
|
+
def _point_to_preference(self, point: Any) -> UserPreference:
|
|
1253
|
+
"""Convert Qdrant point to UserPreference."""
|
|
1254
|
+
payload = point.payload
|
|
1255
|
+
return UserPreference(
|
|
1256
|
+
id=payload["id"],
|
|
1257
|
+
user_id=payload["user_id"],
|
|
1258
|
+
category=payload.get("category") or "general",
|
|
1259
|
+
preference=payload["preference"],
|
|
1260
|
+
source=payload.get("source") or "unknown",
|
|
1261
|
+
confidence=payload.get("confidence") or 1.0,
|
|
1262
|
+
timestamp=self._str_to_datetime(payload.get("timestamp"))
|
|
1263
|
+
or datetime.now(timezone.utc),
|
|
1264
|
+
metadata=payload.get("metadata") or {},
|
|
1265
|
+
)
|
|
1266
|
+
|
|
1267
|
+
def _point_to_domain_knowledge(self, point: Any) -> DomainKnowledge:
|
|
1268
|
+
"""Convert Qdrant point to DomainKnowledge."""
|
|
1269
|
+
payload = point.payload
|
|
1270
|
+
return DomainKnowledge(
|
|
1271
|
+
id=payload["id"],
|
|
1272
|
+
agent=payload["agent"],
|
|
1273
|
+
project_id=payload["project_id"],
|
|
1274
|
+
domain=payload.get("domain") or "general",
|
|
1275
|
+
fact=payload["fact"],
|
|
1276
|
+
source=payload.get("source") or "unknown",
|
|
1277
|
+
confidence=payload.get("confidence") or 1.0,
|
|
1278
|
+
last_verified=self._str_to_datetime(payload.get("last_verified"))
|
|
1279
|
+
or datetime.now(timezone.utc),
|
|
1280
|
+
embedding=getattr(point, "vector", None),
|
|
1281
|
+
metadata=payload.get("metadata") or {},
|
|
1282
|
+
)
|
|
1283
|
+
|
|
1284
|
+
def _point_to_anti_pattern(self, point: Any) -> AntiPattern:
|
|
1285
|
+
"""Convert Qdrant point to AntiPattern."""
|
|
1286
|
+
payload = point.payload
|
|
1287
|
+
return AntiPattern(
|
|
1288
|
+
id=payload["id"],
|
|
1289
|
+
agent=payload["agent"],
|
|
1290
|
+
project_id=payload["project_id"],
|
|
1291
|
+
pattern=payload["pattern"],
|
|
1292
|
+
why_bad=payload.get("why_bad") or "",
|
|
1293
|
+
better_alternative=payload.get("better_alternative") or "",
|
|
1294
|
+
occurrence_count=payload.get("occurrence_count") or 1,
|
|
1295
|
+
last_seen=self._str_to_datetime(payload.get("last_seen"))
|
|
1296
|
+
or datetime.now(timezone.utc),
|
|
1297
|
+
created_at=self._str_to_datetime(payload.get("created_at"))
|
|
1298
|
+
or datetime.now(timezone.utc),
|
|
1299
|
+
embedding=getattr(point, "vector", None),
|
|
1300
|
+
metadata=payload.get("metadata") or {},
|
|
1301
|
+
)
|
|
1302
|
+
|
|
1303
|
+
def close(self) -> None:
|
|
1304
|
+
"""Close the Qdrant client connection."""
|
|
1305
|
+
if self._client:
|
|
1306
|
+
self._client.close()
|