alma-memory 0.5.0__py3-none-any.whl → 0.5.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- alma/__init__.py +33 -1
- alma/core.py +124 -16
- alma/extraction/auto_learner.py +4 -3
- alma/graph/__init__.py +26 -1
- alma/graph/backends/__init__.py +14 -0
- alma/graph/backends/kuzu.py +624 -0
- alma/graph/backends/memgraph.py +432 -0
- alma/integration/claude_agents.py +22 -10
- alma/learning/protocols.py +3 -3
- alma/mcp/tools.py +9 -11
- alma/observability/__init__.py +84 -0
- alma/observability/config.py +302 -0
- alma/observability/logging.py +424 -0
- alma/observability/metrics.py +583 -0
- alma/observability/tracing.py +440 -0
- alma/retrieval/engine.py +65 -4
- alma/storage/__init__.py +29 -0
- alma/storage/azure_cosmos.py +343 -132
- alma/storage/base.py +58 -0
- alma/storage/constants.py +103 -0
- alma/storage/file_based.py +3 -8
- alma/storage/migrations/__init__.py +21 -0
- alma/storage/migrations/base.py +321 -0
- alma/storage/migrations/runner.py +323 -0
- alma/storage/migrations/version_stores.py +337 -0
- alma/storage/migrations/versions/__init__.py +11 -0
- alma/storage/migrations/versions/v1_0_0.py +373 -0
- alma/storage/postgresql.py +185 -78
- alma/storage/sqlite_local.py +149 -50
- alma/testing/__init__.py +46 -0
- alma/testing/factories.py +301 -0
- alma/testing/mocks.py +389 -0
- {alma_memory-0.5.0.dist-info → alma_memory-0.5.1.dist-info}/METADATA +42 -8
- {alma_memory-0.5.0.dist-info → alma_memory-0.5.1.dist-info}/RECORD +36 -19
- {alma_memory-0.5.0.dist-info → alma_memory-0.5.1.dist-info}/WHEEL +0 -0
- {alma_memory-0.5.0.dist-info → alma_memory-0.5.1.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,432 @@
|
|
|
1
|
+
"""
|
|
2
|
+
ALMA Graph Memory - Memgraph Backend.
|
|
3
|
+
|
|
4
|
+
Memgraph implementation of the GraphBackend interface.
|
|
5
|
+
Memgraph is compatible with Neo4j's Bolt protocol, so the neo4j Python driver works with it.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import json
|
|
9
|
+
import logging
|
|
10
|
+
from datetime import datetime, timezone
|
|
11
|
+
from typing import Any, Dict, List, Optional
|
|
12
|
+
|
|
13
|
+
from alma.graph.base import GraphBackend
|
|
14
|
+
from alma.graph.store import Entity, Relationship
|
|
15
|
+
|
|
16
|
+
logger = logging.getLogger(__name__)
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class MemgraphBackend(GraphBackend):
|
|
20
|
+
"""
|
|
21
|
+
Memgraph graph database backend.
|
|
22
|
+
|
|
23
|
+
Memgraph is an in-memory graph database compatible with Neo4j's Bolt protocol.
|
|
24
|
+
This backend uses the neo4j Python driver.
|
|
25
|
+
|
|
26
|
+
Requires neo4j Python driver: pip install neo4j
|
|
27
|
+
|
|
28
|
+
Example usage:
|
|
29
|
+
backend = MemgraphBackend(
|
|
30
|
+
uri="bolt://localhost:7687",
|
|
31
|
+
username="",
|
|
32
|
+
password=""
|
|
33
|
+
)
|
|
34
|
+
backend.add_entity(entity)
|
|
35
|
+
backend.close()
|
|
36
|
+
|
|
37
|
+
Note: Memgraph typically doesn't require authentication by default,
|
|
38
|
+
but it can be configured. Use empty strings for username/password if
|
|
39
|
+
authentication is disabled.
|
|
40
|
+
"""
|
|
41
|
+
|
|
42
|
+
def __init__(
|
|
43
|
+
self,
|
|
44
|
+
uri: str = "bolt://localhost:7687",
|
|
45
|
+
username: str = "",
|
|
46
|
+
password: str = "",
|
|
47
|
+
database: str = "memgraph",
|
|
48
|
+
):
|
|
49
|
+
"""
|
|
50
|
+
Initialize Memgraph connection.
|
|
51
|
+
|
|
52
|
+
Args:
|
|
53
|
+
uri: Memgraph connection URI (bolt://)
|
|
54
|
+
username: Database username (empty if auth disabled)
|
|
55
|
+
password: Database password (empty if auth disabled)
|
|
56
|
+
database: Database name (default: "memgraph")
|
|
57
|
+
"""
|
|
58
|
+
self.uri = uri
|
|
59
|
+
self.username = username
|
|
60
|
+
self.password = password
|
|
61
|
+
self.database = database
|
|
62
|
+
self._driver = None
|
|
63
|
+
|
|
64
|
+
def _get_driver(self):
|
|
65
|
+
"""Lazy initialization of Memgraph driver (via neo4j package)."""
|
|
66
|
+
if self._driver is None:
|
|
67
|
+
try:
|
|
68
|
+
from neo4j import GraphDatabase
|
|
69
|
+
|
|
70
|
+
# Memgraph uses the same Bolt protocol as Neo4j
|
|
71
|
+
if self.username and self.password:
|
|
72
|
+
self._driver = GraphDatabase.driver(
|
|
73
|
+
self.uri,
|
|
74
|
+
auth=(self.username, self.password),
|
|
75
|
+
)
|
|
76
|
+
else:
|
|
77
|
+
# No authentication
|
|
78
|
+
self._driver = GraphDatabase.driver(self.uri)
|
|
79
|
+
except ImportError as err:
|
|
80
|
+
raise ImportError(
|
|
81
|
+
"neo4j package required for Memgraph graph backend. "
|
|
82
|
+
"Install with: pip install neo4j"
|
|
83
|
+
) from err
|
|
84
|
+
return self._driver
|
|
85
|
+
|
|
86
|
+
def _run_query(self, query: str, parameters: Optional[Dict] = None) -> List[Dict]:
|
|
87
|
+
"""Execute a Cypher query."""
|
|
88
|
+
driver = self._get_driver()
|
|
89
|
+
# Memgraph doesn't use the database parameter in the same way as Neo4j
|
|
90
|
+
# Most Memgraph setups use a single database
|
|
91
|
+
with driver.session() as session:
|
|
92
|
+
result = session.run(query, parameters or {})
|
|
93
|
+
return [dict(record) for record in result]
|
|
94
|
+
|
|
95
|
+
def add_entity(self, entity: Entity) -> str:
|
|
96
|
+
"""Add or update an entity in Memgraph."""
|
|
97
|
+
# Extract project_id and agent from properties if present
|
|
98
|
+
properties = entity.properties.copy()
|
|
99
|
+
project_id = properties.pop("project_id", None)
|
|
100
|
+
agent = properties.pop("agent", None)
|
|
101
|
+
|
|
102
|
+
query = """
|
|
103
|
+
MERGE (e:Entity {id: $id})
|
|
104
|
+
SET e.name = $name,
|
|
105
|
+
e.entity_type = $entity_type,
|
|
106
|
+
e.properties = $properties,
|
|
107
|
+
e.created_at = $created_at
|
|
108
|
+
"""
|
|
109
|
+
params = {
|
|
110
|
+
"id": entity.id,
|
|
111
|
+
"name": entity.name,
|
|
112
|
+
"entity_type": entity.entity_type,
|
|
113
|
+
"properties": json.dumps(properties),
|
|
114
|
+
"created_at": entity.created_at.isoformat(),
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
# Add optional fields if present
|
|
118
|
+
if project_id:
|
|
119
|
+
query += ", e.project_id = $project_id"
|
|
120
|
+
params["project_id"] = project_id
|
|
121
|
+
if agent:
|
|
122
|
+
query += ", e.agent = $agent"
|
|
123
|
+
params["agent"] = agent
|
|
124
|
+
|
|
125
|
+
query += " RETURN e.id as id"
|
|
126
|
+
|
|
127
|
+
result = self._run_query(query, params)
|
|
128
|
+
return result[0]["id"] if result else entity.id
|
|
129
|
+
|
|
130
|
+
def add_relationship(self, relationship: Relationship) -> str:
|
|
131
|
+
"""Add or update a relationship in Memgraph."""
|
|
132
|
+
# Sanitize relationship type for Cypher (remove special characters)
|
|
133
|
+
rel_type = (
|
|
134
|
+
relationship.relation_type.replace("-", "_").replace(" ", "_").upper()
|
|
135
|
+
)
|
|
136
|
+
|
|
137
|
+
query = f"""
|
|
138
|
+
MATCH (source:Entity {{id: $source_id}})
|
|
139
|
+
MATCH (target:Entity {{id: $target_id}})
|
|
140
|
+
MERGE (source)-[r:{rel_type}]->(target)
|
|
141
|
+
SET r.id = $id,
|
|
142
|
+
r.properties = $properties,
|
|
143
|
+
r.confidence = $confidence,
|
|
144
|
+
r.created_at = $created_at
|
|
145
|
+
RETURN r.id as id
|
|
146
|
+
"""
|
|
147
|
+
result = self._run_query(
|
|
148
|
+
query,
|
|
149
|
+
{
|
|
150
|
+
"id": relationship.id,
|
|
151
|
+
"source_id": relationship.source_id,
|
|
152
|
+
"target_id": relationship.target_id,
|
|
153
|
+
"properties": json.dumps(relationship.properties),
|
|
154
|
+
"confidence": relationship.confidence,
|
|
155
|
+
"created_at": relationship.created_at.isoformat(),
|
|
156
|
+
},
|
|
157
|
+
)
|
|
158
|
+
return result[0]["id"] if result else relationship.id
|
|
159
|
+
|
|
160
|
+
def get_entity(self, entity_id: str) -> Optional[Entity]:
|
|
161
|
+
"""Get an entity by ID."""
|
|
162
|
+
query = """
|
|
163
|
+
MATCH (e:Entity {id: $id})
|
|
164
|
+
RETURN e.id as id, e.name as name, e.entity_type as entity_type,
|
|
165
|
+
e.properties as properties, e.created_at as created_at,
|
|
166
|
+
e.project_id as project_id, e.agent as agent
|
|
167
|
+
"""
|
|
168
|
+
result = self._run_query(query, {"id": entity_id})
|
|
169
|
+
if not result:
|
|
170
|
+
return None
|
|
171
|
+
|
|
172
|
+
r = result[0]
|
|
173
|
+
properties = json.loads(r["properties"]) if r["properties"] else {}
|
|
174
|
+
|
|
175
|
+
# Add project_id and agent back to properties if present
|
|
176
|
+
if r.get("project_id"):
|
|
177
|
+
properties["project_id"] = r["project_id"]
|
|
178
|
+
if r.get("agent"):
|
|
179
|
+
properties["agent"] = r["agent"]
|
|
180
|
+
|
|
181
|
+
return Entity(
|
|
182
|
+
id=r["id"],
|
|
183
|
+
name=r["name"],
|
|
184
|
+
entity_type=r["entity_type"],
|
|
185
|
+
properties=properties,
|
|
186
|
+
created_at=(
|
|
187
|
+
datetime.fromisoformat(r["created_at"])
|
|
188
|
+
if r["created_at"]
|
|
189
|
+
else datetime.now(timezone.utc)
|
|
190
|
+
),
|
|
191
|
+
)
|
|
192
|
+
|
|
193
|
+
def get_entities(
|
|
194
|
+
self,
|
|
195
|
+
entity_type: Optional[str] = None,
|
|
196
|
+
project_id: Optional[str] = None,
|
|
197
|
+
agent: Optional[str] = None,
|
|
198
|
+
limit: int = 100,
|
|
199
|
+
) -> List[Entity]:
|
|
200
|
+
"""Get entities with optional filtering."""
|
|
201
|
+
conditions = []
|
|
202
|
+
params: Dict[str, Any] = {"limit": limit}
|
|
203
|
+
|
|
204
|
+
if entity_type:
|
|
205
|
+
conditions.append("e.entity_type = $entity_type")
|
|
206
|
+
params["entity_type"] = entity_type
|
|
207
|
+
if project_id:
|
|
208
|
+
conditions.append("e.project_id = $project_id")
|
|
209
|
+
params["project_id"] = project_id
|
|
210
|
+
if agent:
|
|
211
|
+
conditions.append("e.agent = $agent")
|
|
212
|
+
params["agent"] = agent
|
|
213
|
+
|
|
214
|
+
where_clause = f"WHERE {' AND '.join(conditions)}" if conditions else ""
|
|
215
|
+
|
|
216
|
+
query = f"""
|
|
217
|
+
MATCH (e:Entity)
|
|
218
|
+
{where_clause}
|
|
219
|
+
RETURN e.id as id, e.name as name, e.entity_type as entity_type,
|
|
220
|
+
e.properties as properties, e.created_at as created_at,
|
|
221
|
+
e.project_id as project_id, e.agent as agent
|
|
222
|
+
LIMIT $limit
|
|
223
|
+
"""
|
|
224
|
+
|
|
225
|
+
results = self._run_query(query, params)
|
|
226
|
+
entities = []
|
|
227
|
+
for r in results:
|
|
228
|
+
properties = json.loads(r["properties"]) if r["properties"] else {}
|
|
229
|
+
if r.get("project_id"):
|
|
230
|
+
properties["project_id"] = r["project_id"]
|
|
231
|
+
if r.get("agent"):
|
|
232
|
+
properties["agent"] = r["agent"]
|
|
233
|
+
|
|
234
|
+
entities.append(
|
|
235
|
+
Entity(
|
|
236
|
+
id=r["id"],
|
|
237
|
+
name=r["name"],
|
|
238
|
+
entity_type=r["entity_type"],
|
|
239
|
+
properties=properties,
|
|
240
|
+
created_at=(
|
|
241
|
+
datetime.fromisoformat(r["created_at"])
|
|
242
|
+
if r["created_at"]
|
|
243
|
+
else datetime.now(timezone.utc)
|
|
244
|
+
),
|
|
245
|
+
)
|
|
246
|
+
)
|
|
247
|
+
return entities
|
|
248
|
+
|
|
249
|
+
def get_relationships(self, entity_id: str) -> List[Relationship]:
|
|
250
|
+
"""Get all relationships for an entity (both directions)."""
|
|
251
|
+
query = """
|
|
252
|
+
MATCH (e:Entity {id: $entity_id})-[r]-(other:Entity)
|
|
253
|
+
RETURN r.id as id,
|
|
254
|
+
CASE WHEN startNode(r).id = $entity_id THEN e.id ELSE other.id END as source_id,
|
|
255
|
+
CASE WHEN endNode(r).id = $entity_id THEN e.id ELSE other.id END as target_id,
|
|
256
|
+
type(r) as relation_type, r.properties as properties,
|
|
257
|
+
r.confidence as confidence, r.created_at as created_at
|
|
258
|
+
"""
|
|
259
|
+
|
|
260
|
+
results = self._run_query(query, {"entity_id": entity_id})
|
|
261
|
+
relationships = []
|
|
262
|
+
for r in results:
|
|
263
|
+
rel_id = (
|
|
264
|
+
r["id"] or f"{r['source_id']}-{r['relation_type']}-{r['target_id']}"
|
|
265
|
+
)
|
|
266
|
+
relationships.append(
|
|
267
|
+
Relationship(
|
|
268
|
+
id=rel_id,
|
|
269
|
+
source_id=r["source_id"],
|
|
270
|
+
target_id=r["target_id"],
|
|
271
|
+
relation_type=r["relation_type"],
|
|
272
|
+
properties=json.loads(r["properties"]) if r["properties"] else {},
|
|
273
|
+
confidence=r["confidence"] or 1.0,
|
|
274
|
+
created_at=(
|
|
275
|
+
datetime.fromisoformat(r["created_at"])
|
|
276
|
+
if r["created_at"]
|
|
277
|
+
else datetime.now(timezone.utc)
|
|
278
|
+
),
|
|
279
|
+
)
|
|
280
|
+
)
|
|
281
|
+
return relationships
|
|
282
|
+
|
|
283
|
+
def search_entities(
|
|
284
|
+
self,
|
|
285
|
+
query: str,
|
|
286
|
+
embedding: Optional[List[float]] = None,
|
|
287
|
+
top_k: int = 10,
|
|
288
|
+
) -> List[Entity]:
|
|
289
|
+
"""
|
|
290
|
+
Search for entities by name.
|
|
291
|
+
|
|
292
|
+
Note: Vector similarity search requires Memgraph MAGE with vector operations.
|
|
293
|
+
Falls back to text search if embedding is provided but vector index
|
|
294
|
+
is not available.
|
|
295
|
+
"""
|
|
296
|
+
# For now, we do text-based search
|
|
297
|
+
# Vector search can be added when Memgraph MAGE is configured
|
|
298
|
+
cypher = """
|
|
299
|
+
MATCH (e:Entity)
|
|
300
|
+
WHERE toLower(e.name) CONTAINS toLower($query)
|
|
301
|
+
RETURN e.id as id, e.name as name, e.entity_type as entity_type,
|
|
302
|
+
e.properties as properties, e.created_at as created_at,
|
|
303
|
+
e.project_id as project_id, e.agent as agent
|
|
304
|
+
LIMIT $limit
|
|
305
|
+
"""
|
|
306
|
+
|
|
307
|
+
results = self._run_query(cypher, {"query": query, "limit": top_k})
|
|
308
|
+
entities = []
|
|
309
|
+
for r in results:
|
|
310
|
+
properties = json.loads(r["properties"]) if r["properties"] else {}
|
|
311
|
+
if r.get("project_id"):
|
|
312
|
+
properties["project_id"] = r["project_id"]
|
|
313
|
+
if r.get("agent"):
|
|
314
|
+
properties["agent"] = r["agent"]
|
|
315
|
+
|
|
316
|
+
entities.append(
|
|
317
|
+
Entity(
|
|
318
|
+
id=r["id"],
|
|
319
|
+
name=r["name"],
|
|
320
|
+
entity_type=r["entity_type"],
|
|
321
|
+
properties=properties,
|
|
322
|
+
created_at=(
|
|
323
|
+
datetime.fromisoformat(r["created_at"])
|
|
324
|
+
if r["created_at"]
|
|
325
|
+
else datetime.now(timezone.utc)
|
|
326
|
+
),
|
|
327
|
+
)
|
|
328
|
+
)
|
|
329
|
+
return entities
|
|
330
|
+
|
|
331
|
+
def delete_entity(self, entity_id: str) -> bool:
|
|
332
|
+
"""Delete an entity and its relationships."""
|
|
333
|
+
query = """
|
|
334
|
+
MATCH (e:Entity {id: $id})
|
|
335
|
+
DETACH DELETE e
|
|
336
|
+
RETURN count(e) as deleted
|
|
337
|
+
"""
|
|
338
|
+
result = self._run_query(query, {"id": entity_id})
|
|
339
|
+
return result[0]["deleted"] > 0 if result else False
|
|
340
|
+
|
|
341
|
+
def delete_relationship(self, relationship_id: str) -> bool:
|
|
342
|
+
"""Delete a specific relationship by ID."""
|
|
343
|
+
query = """
|
|
344
|
+
MATCH ()-[r]-()
|
|
345
|
+
WHERE r.id = $id
|
|
346
|
+
DELETE r
|
|
347
|
+
RETURN count(r) as deleted
|
|
348
|
+
"""
|
|
349
|
+
result = self._run_query(query, {"id": relationship_id})
|
|
350
|
+
return result[0]["deleted"] > 0 if result else False
|
|
351
|
+
|
|
352
|
+
def close(self) -> None:
|
|
353
|
+
"""Close the Memgraph driver connection."""
|
|
354
|
+
if self._driver:
|
|
355
|
+
self._driver.close()
|
|
356
|
+
self._driver = None
|
|
357
|
+
|
|
358
|
+
# Additional methods for compatibility with existing GraphStore API
|
|
359
|
+
|
|
360
|
+
def find_entities(
|
|
361
|
+
self,
|
|
362
|
+
name: Optional[str] = None,
|
|
363
|
+
entity_type: Optional[str] = None,
|
|
364
|
+
limit: int = 10,
|
|
365
|
+
) -> List[Entity]:
|
|
366
|
+
"""
|
|
367
|
+
Find entities by name or type.
|
|
368
|
+
|
|
369
|
+
This method provides compatibility with the existing GraphStore API.
|
|
370
|
+
"""
|
|
371
|
+
if name:
|
|
372
|
+
return self.search_entities(query=name, top_k=limit)
|
|
373
|
+
|
|
374
|
+
return self.get_entities(entity_type=entity_type, limit=limit)
|
|
375
|
+
|
|
376
|
+
def get_relationships_directional(
|
|
377
|
+
self,
|
|
378
|
+
entity_id: str,
|
|
379
|
+
direction: str = "both",
|
|
380
|
+
relation_type: Optional[str] = None,
|
|
381
|
+
) -> List[Relationship]:
|
|
382
|
+
"""
|
|
383
|
+
Get relationships for an entity with direction control.
|
|
384
|
+
|
|
385
|
+
This method provides compatibility with the existing GraphStore API.
|
|
386
|
+
|
|
387
|
+
Args:
|
|
388
|
+
entity_id: The entity ID.
|
|
389
|
+
direction: "outgoing", "incoming", or "both".
|
|
390
|
+
relation_type: Optional filter by relationship type.
|
|
391
|
+
|
|
392
|
+
Returns:
|
|
393
|
+
List of matching relationships.
|
|
394
|
+
"""
|
|
395
|
+
if direction == "outgoing":
|
|
396
|
+
pattern = "(e)-[r]->(other)"
|
|
397
|
+
elif direction == "incoming":
|
|
398
|
+
pattern = "(e)<-[r]-(other)"
|
|
399
|
+
else:
|
|
400
|
+
pattern = "(e)-[r]-(other)"
|
|
401
|
+
|
|
402
|
+
type_filter = f":{relation_type}" if relation_type else ""
|
|
403
|
+
|
|
404
|
+
query = f"""
|
|
405
|
+
MATCH (e:Entity {{id: $entity_id}}){pattern.replace("[r]", f"[r{type_filter}]")}
|
|
406
|
+
RETURN r.id as id, e.id as source_id, other.id as target_id,
|
|
407
|
+
type(r) as relation_type, r.properties as properties,
|
|
408
|
+
r.confidence as confidence, r.created_at as created_at
|
|
409
|
+
"""
|
|
410
|
+
|
|
411
|
+
results = self._run_query(query, {"entity_id": entity_id})
|
|
412
|
+
relationships = []
|
|
413
|
+
for r in results:
|
|
414
|
+
rel_id = (
|
|
415
|
+
r["id"] or f"{r['source_id']}-{r['relation_type']}-{r['target_id']}"
|
|
416
|
+
)
|
|
417
|
+
relationships.append(
|
|
418
|
+
Relationship(
|
|
419
|
+
id=rel_id,
|
|
420
|
+
source_id=r["source_id"],
|
|
421
|
+
target_id=r["target_id"],
|
|
422
|
+
relation_type=r["relation_type"],
|
|
423
|
+
properties=json.loads(r["properties"]) if r["properties"] else {},
|
|
424
|
+
confidence=r["confidence"] or 1.0,
|
|
425
|
+
created_at=(
|
|
426
|
+
datetime.fromisoformat(r["created_at"])
|
|
427
|
+
if r["created_at"]
|
|
428
|
+
else datetime.now(timezone.utc)
|
|
429
|
+
),
|
|
430
|
+
)
|
|
431
|
+
)
|
|
432
|
+
return relationships
|
|
@@ -212,8 +212,8 @@ class ClaudeAgentHooks:
|
|
|
212
212
|
self.harness.post_run(harness_context, run_result)
|
|
213
213
|
return True
|
|
214
214
|
else:
|
|
215
|
-
# Direct ALMA learning
|
|
216
|
-
|
|
215
|
+
# Direct ALMA learning - now returns Outcome object
|
|
216
|
+
self.alma.learn(
|
|
217
217
|
agent=self.agent_name,
|
|
218
218
|
task=context.task_description,
|
|
219
219
|
outcome="success" if outcome.success else "failure",
|
|
@@ -223,6 +223,7 @@ class ClaudeAgentHooks:
|
|
|
223
223
|
error_message=outcome.error_message,
|
|
224
224
|
feedback=outcome.feedback,
|
|
225
225
|
)
|
|
226
|
+
return True
|
|
226
227
|
|
|
227
228
|
def format_memories_for_prompt(
|
|
228
229
|
self,
|
|
@@ -312,15 +313,26 @@ class ClaudeAgentHooks:
|
|
|
312
313
|
source: How this was discovered
|
|
313
314
|
|
|
314
315
|
Returns:
|
|
315
|
-
True if knowledge was added
|
|
316
|
+
True if knowledge was added
|
|
317
|
+
|
|
318
|
+
Raises:
|
|
319
|
+
ScopeViolationError: If domain is not within agent's scope
|
|
316
320
|
"""
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
321
|
+
from alma.exceptions import ScopeViolationError
|
|
322
|
+
|
|
323
|
+
try:
|
|
324
|
+
self.alma.add_domain_knowledge(
|
|
325
|
+
agent=self.agent_name,
|
|
326
|
+
domain=domain,
|
|
327
|
+
fact=fact,
|
|
328
|
+
source=source,
|
|
329
|
+
)
|
|
330
|
+
return True
|
|
331
|
+
except ScopeViolationError:
|
|
332
|
+
logger.warning(
|
|
333
|
+
f"[{self.agent_name}] Scope violation: cannot add knowledge in domain '{domain}'"
|
|
334
|
+
)
|
|
335
|
+
return False
|
|
324
336
|
|
|
325
337
|
|
|
326
338
|
class AgentIntegration:
|
alma/learning/protocols.py
CHANGED
|
@@ -67,7 +67,7 @@ class LearningProtocol:
|
|
|
67
67
|
duration_ms: Optional[int] = None,
|
|
68
68
|
error_message: Optional[str] = None,
|
|
69
69
|
feedback: Optional[str] = None,
|
|
70
|
-
) ->
|
|
70
|
+
) -> Outcome:
|
|
71
71
|
"""
|
|
72
72
|
Learn from a task outcome.
|
|
73
73
|
|
|
@@ -85,7 +85,7 @@ class LearningProtocol:
|
|
|
85
85
|
feedback: User feedback
|
|
86
86
|
|
|
87
87
|
Returns:
|
|
88
|
-
|
|
88
|
+
The created Outcome record
|
|
89
89
|
"""
|
|
90
90
|
# Validate agent has a scope (warn but don't block)
|
|
91
91
|
scope = self.scopes.get(agent)
|
|
@@ -133,7 +133,7 @@ class LearningProtocol:
|
|
|
133
133
|
error=error_message,
|
|
134
134
|
)
|
|
135
135
|
|
|
136
|
-
return
|
|
136
|
+
return outcome_record
|
|
137
137
|
|
|
138
138
|
def _maybe_create_heuristic(
|
|
139
139
|
self,
|
alma/mcp/tools.py
CHANGED
|
@@ -172,7 +172,7 @@ def alma_learn(
|
|
|
172
172
|
return {"success": False, "error": "strategy_used cannot be empty"}
|
|
173
173
|
|
|
174
174
|
try:
|
|
175
|
-
|
|
175
|
+
outcome_record = alma.learn(
|
|
176
176
|
agent=agent,
|
|
177
177
|
task=task,
|
|
178
178
|
outcome=outcome,
|
|
@@ -185,10 +185,14 @@ def alma_learn(
|
|
|
185
185
|
|
|
186
186
|
return {
|
|
187
187
|
"success": True,
|
|
188
|
-
"learned":
|
|
189
|
-
"
|
|
190
|
-
"
|
|
191
|
-
|
|
188
|
+
"learned": True,
|
|
189
|
+
"outcome": {
|
|
190
|
+
"id": outcome_record.id,
|
|
191
|
+
"agent": outcome_record.agent,
|
|
192
|
+
"task_type": outcome_record.task_type,
|
|
193
|
+
"success": outcome_record.success,
|
|
194
|
+
},
|
|
195
|
+
"message": "Outcome recorded successfully",
|
|
192
196
|
}
|
|
193
197
|
|
|
194
198
|
except Exception as e:
|
|
@@ -290,12 +294,6 @@ def alma_add_knowledge(
|
|
|
290
294
|
source=source,
|
|
291
295
|
)
|
|
292
296
|
|
|
293
|
-
if knowledge is None:
|
|
294
|
-
return {
|
|
295
|
-
"success": False,
|
|
296
|
-
"error": f"Agent '{agent}' not allowed to learn in domain '{domain}'",
|
|
297
|
-
}
|
|
298
|
-
|
|
299
297
|
return {
|
|
300
298
|
"success": True,
|
|
301
299
|
"knowledge": {
|
|
@@ -0,0 +1,84 @@
|
|
|
1
|
+
"""
|
|
2
|
+
ALMA Observability Module.
|
|
3
|
+
|
|
4
|
+
Provides comprehensive observability features including:
|
|
5
|
+
- OpenTelemetry integration for distributed tracing
|
|
6
|
+
- Structured JSON logging
|
|
7
|
+
- Metrics collection (counters, histograms, gauges)
|
|
8
|
+
- Performance monitoring
|
|
9
|
+
|
|
10
|
+
This module follows the OpenTelemetry specification and supports
|
|
11
|
+
integration with common observability backends (Jaeger, Prometheus,
|
|
12
|
+
DataDog, etc.).
|
|
13
|
+
|
|
14
|
+
Usage:
|
|
15
|
+
from alma.observability import (
|
|
16
|
+
get_tracer,
|
|
17
|
+
get_meter,
|
|
18
|
+
get_logger,
|
|
19
|
+
configure_observability,
|
|
20
|
+
ALMAMetrics,
|
|
21
|
+
)
|
|
22
|
+
|
|
23
|
+
# Initialize observability (typically at app startup)
|
|
24
|
+
configure_observability(
|
|
25
|
+
service_name="alma-memory",
|
|
26
|
+
enable_tracing=True,
|
|
27
|
+
enable_metrics=True,
|
|
28
|
+
log_format="json",
|
|
29
|
+
)
|
|
30
|
+
|
|
31
|
+
# Use in code
|
|
32
|
+
tracer = get_tracer(__name__)
|
|
33
|
+
with tracer.start_as_current_span("my_operation"):
|
|
34
|
+
# ... your code
|
|
35
|
+
pass
|
|
36
|
+
"""
|
|
37
|
+
|
|
38
|
+
from alma.observability.config import (
|
|
39
|
+
ObservabilityConfig,
|
|
40
|
+
configure_observability,
|
|
41
|
+
shutdown_observability,
|
|
42
|
+
)
|
|
43
|
+
from alma.observability.logging import (
|
|
44
|
+
JSONFormatter,
|
|
45
|
+
StructuredLogger,
|
|
46
|
+
get_logger,
|
|
47
|
+
setup_logging,
|
|
48
|
+
)
|
|
49
|
+
from alma.observability.metrics import (
|
|
50
|
+
ALMAMetrics,
|
|
51
|
+
MetricsCollector,
|
|
52
|
+
get_meter,
|
|
53
|
+
get_metrics,
|
|
54
|
+
)
|
|
55
|
+
from alma.observability.tracing import (
|
|
56
|
+
SpanKind,
|
|
57
|
+
TracingContext,
|
|
58
|
+
get_tracer,
|
|
59
|
+
trace_async,
|
|
60
|
+
trace_method,
|
|
61
|
+
)
|
|
62
|
+
|
|
63
|
+
__all__ = [
|
|
64
|
+
# Configuration
|
|
65
|
+
"ObservabilityConfig",
|
|
66
|
+
"configure_observability",
|
|
67
|
+
"shutdown_observability",
|
|
68
|
+
# Logging
|
|
69
|
+
"JSONFormatter",
|
|
70
|
+
"StructuredLogger",
|
|
71
|
+
"get_logger",
|
|
72
|
+
"setup_logging",
|
|
73
|
+
# Metrics
|
|
74
|
+
"ALMAMetrics",
|
|
75
|
+
"MetricsCollector",
|
|
76
|
+
"get_meter",
|
|
77
|
+
"get_metrics",
|
|
78
|
+
# Tracing
|
|
79
|
+
"SpanKind",
|
|
80
|
+
"TracingContext",
|
|
81
|
+
"get_tracer",
|
|
82
|
+
"trace_method",
|
|
83
|
+
"trace_async",
|
|
84
|
+
]
|