alma-memory 0.4.0__py3-none-any.whl → 0.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- alma/__init__.py +88 -44
- alma/confidence/__init__.py +1 -1
- alma/confidence/engine.py +92 -58
- alma/confidence/types.py +34 -14
- alma/config/loader.py +3 -2
- alma/consolidation/__init__.py +23 -0
- alma/consolidation/engine.py +678 -0
- alma/consolidation/prompts.py +84 -0
- alma/core.py +15 -15
- alma/domains/__init__.py +6 -6
- alma/domains/factory.py +12 -9
- alma/domains/schemas.py +17 -3
- alma/domains/types.py +8 -4
- alma/events/__init__.py +75 -0
- alma/events/emitter.py +284 -0
- alma/events/storage_mixin.py +246 -0
- alma/events/types.py +126 -0
- alma/events/webhook.py +425 -0
- alma/exceptions.py +49 -0
- alma/extraction/__init__.py +31 -0
- alma/extraction/auto_learner.py +264 -0
- alma/extraction/extractor.py +420 -0
- alma/graph/__init__.py +81 -0
- alma/graph/backends/__init__.py +18 -0
- alma/graph/backends/memory.py +236 -0
- alma/graph/backends/neo4j.py +417 -0
- alma/graph/base.py +159 -0
- alma/graph/extraction.py +198 -0
- alma/graph/store.py +860 -0
- alma/harness/__init__.py +4 -4
- alma/harness/base.py +18 -9
- alma/harness/domains.py +27 -11
- alma/initializer/__init__.py +1 -1
- alma/initializer/initializer.py +51 -43
- alma/initializer/types.py +25 -17
- alma/integration/__init__.py +9 -9
- alma/integration/claude_agents.py +10 -10
- alma/integration/helena.py +32 -22
- alma/integration/victor.py +57 -33
- alma/learning/__init__.py +27 -27
- alma/learning/forgetting.py +198 -148
- alma/learning/heuristic_extractor.py +40 -24
- alma/learning/protocols.py +62 -14
- alma/learning/validation.py +7 -2
- alma/mcp/__init__.py +4 -4
- alma/mcp/__main__.py +2 -1
- alma/mcp/resources.py +17 -16
- alma/mcp/server.py +102 -44
- alma/mcp/tools.py +174 -37
- alma/progress/__init__.py +3 -3
- alma/progress/tracker.py +26 -20
- alma/progress/types.py +8 -12
- alma/py.typed +0 -0
- alma/retrieval/__init__.py +11 -11
- alma/retrieval/cache.py +20 -21
- alma/retrieval/embeddings.py +4 -4
- alma/retrieval/engine.py +114 -35
- alma/retrieval/scoring.py +73 -63
- alma/session/__init__.py +2 -2
- alma/session/manager.py +5 -5
- alma/session/types.py +5 -4
- alma/storage/__init__.py +41 -0
- alma/storage/azure_cosmos.py +101 -31
- alma/storage/base.py +157 -4
- alma/storage/chroma.py +1443 -0
- alma/storage/file_based.py +56 -20
- alma/storage/pinecone.py +1080 -0
- alma/storage/postgresql.py +1452 -0
- alma/storage/qdrant.py +1306 -0
- alma/storage/sqlite_local.py +376 -31
- alma/types.py +62 -14
- alma_memory-0.5.0.dist-info/METADATA +905 -0
- alma_memory-0.5.0.dist-info/RECORD +76 -0
- {alma_memory-0.4.0.dist-info → alma_memory-0.5.0.dist-info}/WHEEL +1 -1
- alma_memory-0.4.0.dist-info/METADATA +0 -488
- alma_memory-0.4.0.dist-info/RECORD +0 -52
- {alma_memory-0.4.0.dist-info → alma_memory-0.5.0.dist-info}/top_level.txt +0 -0
alma/storage/azure_cosmos.py
CHANGED
|
@@ -17,20 +17,18 @@ Configuration (config.yaml):
|
|
|
17
17
|
embedding_dim: 384
|
|
18
18
|
"""
|
|
19
19
|
|
|
20
|
-
import json
|
|
21
20
|
import logging
|
|
22
21
|
from datetime import datetime, timezone
|
|
23
|
-
from typing import
|
|
24
|
-
from dataclasses import asdict
|
|
22
|
+
from typing import Any, Dict, List, Optional
|
|
25
23
|
|
|
24
|
+
from alma.storage.base import StorageBackend
|
|
26
25
|
from alma.types import (
|
|
26
|
+
AntiPattern,
|
|
27
|
+
DomainKnowledge,
|
|
27
28
|
Heuristic,
|
|
28
29
|
Outcome,
|
|
29
30
|
UserPreference,
|
|
30
|
-
DomainKnowledge,
|
|
31
|
-
AntiPattern,
|
|
32
31
|
)
|
|
33
|
-
from alma.storage.base import StorageBackend
|
|
34
32
|
|
|
35
33
|
logger = logging.getLogger(__name__)
|
|
36
34
|
|
|
@@ -39,6 +37,7 @@ try:
|
|
|
39
37
|
from azure.cosmos import CosmosClient, PartitionKey, exceptions
|
|
40
38
|
from azure.cosmos.container import ContainerProxy
|
|
41
39
|
from azure.cosmos.database import DatabaseProxy
|
|
40
|
+
|
|
42
41
|
AZURE_COSMOS_AVAILABLE = True
|
|
43
42
|
except ImportError:
|
|
44
43
|
AZURE_COSMOS_AVAILABLE = False
|
|
@@ -49,8 +48,7 @@ except ImportError:
|
|
|
49
48
|
ContainerProxy = Any # type: ignore
|
|
50
49
|
DatabaseProxy = Any # type: ignore
|
|
51
50
|
logger.warning(
|
|
52
|
-
"azure-cosmos package not installed. "
|
|
53
|
-
"Install with: pip install azure-cosmos"
|
|
51
|
+
"azure-cosmos package not installed. Install with: pip install azure-cosmos"
|
|
54
52
|
)
|
|
55
53
|
|
|
56
54
|
|
|
@@ -111,9 +109,7 @@ class AzureCosmosStorage(StorageBackend):
|
|
|
111
109
|
|
|
112
110
|
# Get or create database
|
|
113
111
|
if create_if_not_exists:
|
|
114
|
-
self.database = self.client.create_database_if_not_exists(
|
|
115
|
-
id=database_name
|
|
116
|
-
)
|
|
112
|
+
self.database = self.client.create_database_if_not_exists(id=database_name)
|
|
117
113
|
self._init_containers()
|
|
118
114
|
else:
|
|
119
115
|
self.database = self.client.get_database_client(database_name)
|
|
@@ -195,7 +191,7 @@ class AzureCosmosStorage(StorageBackend):
|
|
|
195
191
|
if cfg["vector_indexes"] and cfg["vector_path"]:
|
|
196
192
|
# Exclude vector path from regular indexing
|
|
197
193
|
indexing_policy["excludedPaths"].append(
|
|
198
|
-
{"path": f
|
|
194
|
+
{"path": f"{cfg['vector_path']}/*"}
|
|
199
195
|
)
|
|
200
196
|
|
|
201
197
|
# Vector embedding policy for DiskANN
|
|
@@ -250,12 +246,14 @@ class AzureCosmosStorage(StorageBackend):
|
|
|
250
246
|
"confidence": heuristic.confidence,
|
|
251
247
|
"occurrence_count": heuristic.occurrence_count,
|
|
252
248
|
"success_count": heuristic.success_count,
|
|
253
|
-
"last_validated":
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
249
|
+
"last_validated": (
|
|
250
|
+
heuristic.last_validated.isoformat()
|
|
251
|
+
if heuristic.last_validated
|
|
252
|
+
else None
|
|
253
|
+
),
|
|
254
|
+
"created_at": (
|
|
255
|
+
heuristic.created_at.isoformat() if heuristic.created_at else None
|
|
256
|
+
),
|
|
259
257
|
"metadata": heuristic.metadata or {},
|
|
260
258
|
"embedding": heuristic.embedding,
|
|
261
259
|
"type": "heuristic",
|
|
@@ -301,9 +299,9 @@ class AzureCosmosStorage(StorageBackend):
|
|
|
301
299
|
"preference": preference.preference,
|
|
302
300
|
"source": preference.source,
|
|
303
301
|
"confidence": preference.confidence,
|
|
304
|
-
"timestamp":
|
|
305
|
-
|
|
306
|
-
|
|
302
|
+
"timestamp": (
|
|
303
|
+
preference.timestamp.isoformat() if preference.timestamp else None
|
|
304
|
+
),
|
|
307
305
|
"metadata": preference.metadata or {},
|
|
308
306
|
"type": "preference",
|
|
309
307
|
}
|
|
@@ -324,9 +322,9 @@ class AzureCosmosStorage(StorageBackend):
|
|
|
324
322
|
"fact": knowledge.fact,
|
|
325
323
|
"source": knowledge.source,
|
|
326
324
|
"confidence": knowledge.confidence,
|
|
327
|
-
"last_verified":
|
|
328
|
-
|
|
329
|
-
|
|
325
|
+
"last_verified": (
|
|
326
|
+
knowledge.last_verified.isoformat() if knowledge.last_verified else None
|
|
327
|
+
),
|
|
330
328
|
"metadata": knowledge.metadata or {},
|
|
331
329
|
"embedding": knowledge.embedding,
|
|
332
330
|
"type": "domain_knowledge",
|
|
@@ -348,12 +346,12 @@ class AzureCosmosStorage(StorageBackend):
|
|
|
348
346
|
"why_bad": anti_pattern.why_bad,
|
|
349
347
|
"better_alternative": anti_pattern.better_alternative,
|
|
350
348
|
"occurrence_count": anti_pattern.occurrence_count,
|
|
351
|
-
"last_seen":
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
"created_at":
|
|
355
|
-
|
|
356
|
-
|
|
349
|
+
"last_seen": (
|
|
350
|
+
anti_pattern.last_seen.isoformat() if anti_pattern.last_seen else None
|
|
351
|
+
),
|
|
352
|
+
"created_at": (
|
|
353
|
+
anti_pattern.created_at.isoformat() if anti_pattern.created_at else None
|
|
354
|
+
),
|
|
357
355
|
"metadata": anti_pattern.metadata or {},
|
|
358
356
|
"embedding": anti_pattern.embedding,
|
|
359
357
|
"type": "anti_pattern",
|
|
@@ -668,7 +666,7 @@ class AzureCosmosStorage(StorageBackend):
|
|
|
668
666
|
return False
|
|
669
667
|
|
|
670
668
|
doc = items[0]
|
|
671
|
-
|
|
669
|
+
doc["project_id"]
|
|
672
670
|
|
|
673
671
|
# Apply updates
|
|
674
672
|
for key, value in updates.items():
|
|
@@ -710,6 +708,78 @@ class AzureCosmosStorage(StorageBackend):
|
|
|
710
708
|
container.replace_item(item=heuristic_id, body=doc)
|
|
711
709
|
return True
|
|
712
710
|
|
|
711
|
+
def update_heuristic_confidence(
|
|
712
|
+
self,
|
|
713
|
+
heuristic_id: str,
|
|
714
|
+
new_confidence: float,
|
|
715
|
+
) -> bool:
|
|
716
|
+
"""
|
|
717
|
+
Update confidence score for a heuristic.
|
|
718
|
+
|
|
719
|
+
Note: This requires a cross-partition query since we only have the ID.
|
|
720
|
+
For better performance, consider using update_heuristic() with the
|
|
721
|
+
project_id if available, which enables point reads.
|
|
722
|
+
"""
|
|
723
|
+
container = self._get_container("heuristics")
|
|
724
|
+
|
|
725
|
+
# Find the heuristic (cross-partition query required without project_id)
|
|
726
|
+
query = "SELECT * FROM c WHERE c.id = @id"
|
|
727
|
+
items = list(
|
|
728
|
+
container.query_items(
|
|
729
|
+
query=query,
|
|
730
|
+
parameters=[{"name": "@id", "value": heuristic_id}],
|
|
731
|
+
enable_cross_partition_query=True,
|
|
732
|
+
)
|
|
733
|
+
)
|
|
734
|
+
|
|
735
|
+
if not items:
|
|
736
|
+
return False
|
|
737
|
+
|
|
738
|
+
doc = items[0]
|
|
739
|
+
doc["confidence"] = new_confidence
|
|
740
|
+
|
|
741
|
+
container.replace_item(item=heuristic_id, body=doc)
|
|
742
|
+
logger.debug(
|
|
743
|
+
f"Updated heuristic confidence: {heuristic_id} -> {new_confidence}"
|
|
744
|
+
)
|
|
745
|
+
return True
|
|
746
|
+
|
|
747
|
+
def update_knowledge_confidence(
|
|
748
|
+
self,
|
|
749
|
+
knowledge_id: str,
|
|
750
|
+
new_confidence: float,
|
|
751
|
+
) -> bool:
|
|
752
|
+
"""
|
|
753
|
+
Update confidence score for domain knowledge.
|
|
754
|
+
|
|
755
|
+
Note: This requires a cross-partition query since we only have the ID.
|
|
756
|
+
For better performance when project_id is known, fetch the document
|
|
757
|
+
directly using point read and update via save_domain_knowledge().
|
|
758
|
+
"""
|
|
759
|
+
container = self._get_container("knowledge")
|
|
760
|
+
|
|
761
|
+
# Find the knowledge item (cross-partition query required without project_id)
|
|
762
|
+
query = "SELECT * FROM c WHERE c.id = @id"
|
|
763
|
+
items = list(
|
|
764
|
+
container.query_items(
|
|
765
|
+
query=query,
|
|
766
|
+
parameters=[{"name": "@id", "value": knowledge_id}],
|
|
767
|
+
enable_cross_partition_query=True,
|
|
768
|
+
)
|
|
769
|
+
)
|
|
770
|
+
|
|
771
|
+
if not items:
|
|
772
|
+
return False
|
|
773
|
+
|
|
774
|
+
doc = items[0]
|
|
775
|
+
doc["confidence"] = new_confidence
|
|
776
|
+
|
|
777
|
+
container.replace_item(item=knowledge_id, body=doc)
|
|
778
|
+
logger.debug(
|
|
779
|
+
f"Updated knowledge confidence: {knowledge_id} -> {new_confidence}"
|
|
780
|
+
)
|
|
781
|
+
return True
|
|
782
|
+
|
|
713
783
|
# ==================== DELETE OPERATIONS ====================
|
|
714
784
|
|
|
715
785
|
def delete_outcomes_older_than(
|
alma/storage/base.py
CHANGED
|
@@ -5,16 +5,15 @@ Abstract base class that all storage backends must implement.
|
|
|
5
5
|
"""
|
|
6
6
|
|
|
7
7
|
from abc import ABC, abstractmethod
|
|
8
|
-
from typing import Optional, List, Dict, Any
|
|
9
8
|
from datetime import datetime
|
|
9
|
+
from typing import Any, Dict, List, Optional
|
|
10
10
|
|
|
11
11
|
from alma.types import (
|
|
12
|
+
AntiPattern,
|
|
13
|
+
DomainKnowledge,
|
|
12
14
|
Heuristic,
|
|
13
15
|
Outcome,
|
|
14
16
|
UserPreference,
|
|
15
|
-
DomainKnowledge,
|
|
16
|
-
AntiPattern,
|
|
17
|
-
MemoryType,
|
|
18
17
|
)
|
|
19
18
|
|
|
20
19
|
|
|
@@ -55,6 +54,22 @@ class StorageBackend(ABC):
|
|
|
55
54
|
"""Save an anti-pattern, return its ID."""
|
|
56
55
|
pass
|
|
57
56
|
|
|
57
|
+
# ==================== BATCH WRITE OPERATIONS ====================
|
|
58
|
+
|
|
59
|
+
def save_heuristics(self, heuristics: List[Heuristic]) -> List[str]:
|
|
60
|
+
"""Save multiple heuristics in a batch. Default implementation calls save_heuristic in a loop."""
|
|
61
|
+
return [self.save_heuristic(h) for h in heuristics]
|
|
62
|
+
|
|
63
|
+
def save_outcomes(self, outcomes: List[Outcome]) -> List[str]:
|
|
64
|
+
"""Save multiple outcomes in a batch. Default implementation calls save_outcome in a loop."""
|
|
65
|
+
return [self.save_outcome(o) for o in outcomes]
|
|
66
|
+
|
|
67
|
+
def save_domain_knowledge_batch(
|
|
68
|
+
self, knowledge_items: List[DomainKnowledge]
|
|
69
|
+
) -> List[str]:
|
|
70
|
+
"""Save multiple domain knowledge items in a batch. Default implementation calls save_domain_knowledge in a loop."""
|
|
71
|
+
return [self.save_domain_knowledge(k) for k in knowledge_items]
|
|
72
|
+
|
|
58
73
|
# ==================== READ OPERATIONS ====================
|
|
59
74
|
|
|
60
75
|
@abstractmethod
|
|
@@ -339,6 +354,144 @@ class StorageBackend(ABC):
|
|
|
339
354
|
"""
|
|
340
355
|
pass
|
|
341
356
|
|
|
357
|
+
# ==================== MULTI-AGENT MEMORY SHARING ====================
|
|
358
|
+
|
|
359
|
+
def get_heuristics_for_agents(
|
|
360
|
+
self,
|
|
361
|
+
project_id: str,
|
|
362
|
+
agents: List[str],
|
|
363
|
+
embedding: Optional[List[float]] = None,
|
|
364
|
+
top_k: int = 5,
|
|
365
|
+
min_confidence: float = 0.0,
|
|
366
|
+
) -> List[Heuristic]:
|
|
367
|
+
"""
|
|
368
|
+
Get heuristics from multiple agents in one call.
|
|
369
|
+
|
|
370
|
+
This enables multi-agent memory sharing where an agent can
|
|
371
|
+
read memories from agents it inherits from.
|
|
372
|
+
|
|
373
|
+
Args:
|
|
374
|
+
project_id: Project to query
|
|
375
|
+
agents: List of agent names to query
|
|
376
|
+
embedding: Query embedding for semantic search
|
|
377
|
+
top_k: Max results to return per agent
|
|
378
|
+
min_confidence: Minimum confidence threshold
|
|
379
|
+
|
|
380
|
+
Returns:
|
|
381
|
+
List of matching heuristics from all specified agents
|
|
382
|
+
"""
|
|
383
|
+
# Default implementation: query each agent individually
|
|
384
|
+
results = []
|
|
385
|
+
for agent in agents:
|
|
386
|
+
agent_heuristics = self.get_heuristics(
|
|
387
|
+
project_id=project_id,
|
|
388
|
+
agent=agent,
|
|
389
|
+
embedding=embedding,
|
|
390
|
+
top_k=top_k,
|
|
391
|
+
min_confidence=min_confidence,
|
|
392
|
+
)
|
|
393
|
+
results.extend(agent_heuristics)
|
|
394
|
+
return results
|
|
395
|
+
|
|
396
|
+
def get_outcomes_for_agents(
|
|
397
|
+
self,
|
|
398
|
+
project_id: str,
|
|
399
|
+
agents: List[str],
|
|
400
|
+
task_type: Optional[str] = None,
|
|
401
|
+
embedding: Optional[List[float]] = None,
|
|
402
|
+
top_k: int = 5,
|
|
403
|
+
success_only: bool = False,
|
|
404
|
+
) -> List[Outcome]:
|
|
405
|
+
"""
|
|
406
|
+
Get outcomes from multiple agents in one call.
|
|
407
|
+
|
|
408
|
+
Args:
|
|
409
|
+
project_id: Project to query
|
|
410
|
+
agents: List of agent names to query
|
|
411
|
+
task_type: Filter by task type
|
|
412
|
+
embedding: Query embedding for semantic search
|
|
413
|
+
top_k: Max results to return per agent
|
|
414
|
+
success_only: Only return successful outcomes
|
|
415
|
+
|
|
416
|
+
Returns:
|
|
417
|
+
List of matching outcomes from all specified agents
|
|
418
|
+
"""
|
|
419
|
+
results = []
|
|
420
|
+
for agent in agents:
|
|
421
|
+
agent_outcomes = self.get_outcomes(
|
|
422
|
+
project_id=project_id,
|
|
423
|
+
agent=agent,
|
|
424
|
+
task_type=task_type,
|
|
425
|
+
embedding=embedding,
|
|
426
|
+
top_k=top_k,
|
|
427
|
+
success_only=success_only,
|
|
428
|
+
)
|
|
429
|
+
results.extend(agent_outcomes)
|
|
430
|
+
return results
|
|
431
|
+
|
|
432
|
+
def get_domain_knowledge_for_agents(
|
|
433
|
+
self,
|
|
434
|
+
project_id: str,
|
|
435
|
+
agents: List[str],
|
|
436
|
+
domain: Optional[str] = None,
|
|
437
|
+
embedding: Optional[List[float]] = None,
|
|
438
|
+
top_k: int = 5,
|
|
439
|
+
) -> List[DomainKnowledge]:
|
|
440
|
+
"""
|
|
441
|
+
Get domain knowledge from multiple agents in one call.
|
|
442
|
+
|
|
443
|
+
Args:
|
|
444
|
+
project_id: Project to query
|
|
445
|
+
agents: List of agent names to query
|
|
446
|
+
domain: Filter by domain
|
|
447
|
+
embedding: Query embedding for semantic search
|
|
448
|
+
top_k: Max results to return per agent
|
|
449
|
+
|
|
450
|
+
Returns:
|
|
451
|
+
List of matching domain knowledge from all specified agents
|
|
452
|
+
"""
|
|
453
|
+
results = []
|
|
454
|
+
for agent in agents:
|
|
455
|
+
agent_knowledge = self.get_domain_knowledge(
|
|
456
|
+
project_id=project_id,
|
|
457
|
+
agent=agent,
|
|
458
|
+
domain=domain,
|
|
459
|
+
embedding=embedding,
|
|
460
|
+
top_k=top_k,
|
|
461
|
+
)
|
|
462
|
+
results.extend(agent_knowledge)
|
|
463
|
+
return results
|
|
464
|
+
|
|
465
|
+
def get_anti_patterns_for_agents(
|
|
466
|
+
self,
|
|
467
|
+
project_id: str,
|
|
468
|
+
agents: List[str],
|
|
469
|
+
embedding: Optional[List[float]] = None,
|
|
470
|
+
top_k: int = 5,
|
|
471
|
+
) -> List[AntiPattern]:
|
|
472
|
+
"""
|
|
473
|
+
Get anti-patterns from multiple agents in one call.
|
|
474
|
+
|
|
475
|
+
Args:
|
|
476
|
+
project_id: Project to query
|
|
477
|
+
agents: List of agent names to query
|
|
478
|
+
embedding: Query embedding for semantic search
|
|
479
|
+
top_k: Max results to return per agent
|
|
480
|
+
|
|
481
|
+
Returns:
|
|
482
|
+
List of matching anti-patterns from all specified agents
|
|
483
|
+
"""
|
|
484
|
+
results = []
|
|
485
|
+
for agent in agents:
|
|
486
|
+
agent_patterns = self.get_anti_patterns(
|
|
487
|
+
project_id=project_id,
|
|
488
|
+
agent=agent,
|
|
489
|
+
embedding=embedding,
|
|
490
|
+
top_k=top_k,
|
|
491
|
+
)
|
|
492
|
+
results.extend(agent_patterns)
|
|
493
|
+
return results
|
|
494
|
+
|
|
342
495
|
# ==================== STATS ====================
|
|
343
496
|
|
|
344
497
|
@abstractmethod
|