alma-memory 0.3.0__py3-none-any.whl → 0.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (77) hide show
  1. alma/__init__.py +99 -29
  2. alma/confidence/__init__.py +47 -0
  3. alma/confidence/engine.py +540 -0
  4. alma/confidence/types.py +351 -0
  5. alma/config/loader.py +3 -2
  6. alma/consolidation/__init__.py +23 -0
  7. alma/consolidation/engine.py +678 -0
  8. alma/consolidation/prompts.py +84 -0
  9. alma/core.py +15 -15
  10. alma/domains/__init__.py +6 -6
  11. alma/domains/factory.py +12 -9
  12. alma/domains/schemas.py +17 -3
  13. alma/domains/types.py +8 -4
  14. alma/events/__init__.py +75 -0
  15. alma/events/emitter.py +284 -0
  16. alma/events/storage_mixin.py +246 -0
  17. alma/events/types.py +126 -0
  18. alma/events/webhook.py +425 -0
  19. alma/exceptions.py +49 -0
  20. alma/extraction/__init__.py +31 -0
  21. alma/extraction/auto_learner.py +264 -0
  22. alma/extraction/extractor.py +420 -0
  23. alma/graph/__init__.py +81 -0
  24. alma/graph/backends/__init__.py +18 -0
  25. alma/graph/backends/memory.py +236 -0
  26. alma/graph/backends/neo4j.py +417 -0
  27. alma/graph/base.py +159 -0
  28. alma/graph/extraction.py +198 -0
  29. alma/graph/store.py +860 -0
  30. alma/harness/__init__.py +4 -4
  31. alma/harness/base.py +18 -9
  32. alma/harness/domains.py +27 -11
  33. alma/initializer/__init__.py +37 -0
  34. alma/initializer/initializer.py +418 -0
  35. alma/initializer/types.py +250 -0
  36. alma/integration/__init__.py +9 -9
  37. alma/integration/claude_agents.py +10 -10
  38. alma/integration/helena.py +32 -22
  39. alma/integration/victor.py +57 -33
  40. alma/learning/__init__.py +27 -27
  41. alma/learning/forgetting.py +198 -148
  42. alma/learning/heuristic_extractor.py +40 -24
  43. alma/learning/protocols.py +62 -14
  44. alma/learning/validation.py +7 -2
  45. alma/mcp/__init__.py +4 -4
  46. alma/mcp/__main__.py +2 -1
  47. alma/mcp/resources.py +17 -16
  48. alma/mcp/server.py +102 -44
  49. alma/mcp/tools.py +174 -37
  50. alma/progress/__init__.py +3 -3
  51. alma/progress/tracker.py +26 -20
  52. alma/progress/types.py +8 -12
  53. alma/py.typed +0 -0
  54. alma/retrieval/__init__.py +11 -11
  55. alma/retrieval/cache.py +20 -21
  56. alma/retrieval/embeddings.py +4 -4
  57. alma/retrieval/engine.py +114 -35
  58. alma/retrieval/scoring.py +73 -63
  59. alma/session/__init__.py +2 -2
  60. alma/session/manager.py +5 -5
  61. alma/session/types.py +5 -4
  62. alma/storage/__init__.py +41 -0
  63. alma/storage/azure_cosmos.py +107 -31
  64. alma/storage/base.py +157 -4
  65. alma/storage/chroma.py +1443 -0
  66. alma/storage/file_based.py +56 -20
  67. alma/storage/pinecone.py +1080 -0
  68. alma/storage/postgresql.py +1452 -0
  69. alma/storage/qdrant.py +1306 -0
  70. alma/storage/sqlite_local.py +376 -31
  71. alma/types.py +62 -14
  72. alma_memory-0.5.0.dist-info/METADATA +905 -0
  73. alma_memory-0.5.0.dist-info/RECORD +76 -0
  74. {alma_memory-0.3.0.dist-info → alma_memory-0.5.0.dist-info}/WHEEL +1 -1
  75. alma_memory-0.3.0.dist-info/METADATA +0 -438
  76. alma_memory-0.3.0.dist-info/RECORD +0 -46
  77. {alma_memory-0.3.0.dist-info → alma_memory-0.5.0.dist-info}/top_level.txt +0 -0
alma/mcp/tools.py CHANGED
@@ -5,11 +5,9 @@ Provides the tool functions that can be called via MCP protocol.
5
5
  Each tool corresponds to an ALMA operation.
6
6
  """
7
7
 
8
- import json
9
8
  import logging
10
- from typing import Dict, Any, Optional, List
11
9
  from datetime import datetime, timezone
12
- from dataclasses import asdict
10
+ from typing import Any, Dict, Optional
13
11
 
14
12
  from alma import ALMA
15
13
  from alma.types import MemorySlice
@@ -32,47 +30,57 @@ def _serialize_memory_slice(memory_slice: MemorySlice) -> Dict[str, Any]:
32
30
  }
33
31
 
34
32
  for h in memory_slice.heuristics:
35
- result["heuristics"].append({
36
- "id": h.id,
37
- "condition": h.condition,
38
- "strategy": h.strategy,
39
- "confidence": h.confidence,
40
- "occurrence_count": h.occurrence_count,
41
- "success_rate": h.success_rate,
42
- })
33
+ result["heuristics"].append(
34
+ {
35
+ "id": h.id,
36
+ "condition": h.condition,
37
+ "strategy": h.strategy,
38
+ "confidence": h.confidence,
39
+ "occurrence_count": h.occurrence_count,
40
+ "success_rate": h.success_rate,
41
+ }
42
+ )
43
43
 
44
44
  for o in memory_slice.outcomes:
45
- result["outcomes"].append({
46
- "id": o.id,
47
- "task_type": o.task_type,
48
- "task_description": o.task_description,
49
- "success": o.success,
50
- "strategy_used": o.strategy_used,
51
- "duration_ms": o.duration_ms,
52
- })
45
+ result["outcomes"].append(
46
+ {
47
+ "id": o.id,
48
+ "task_type": o.task_type,
49
+ "task_description": o.task_description,
50
+ "success": o.success,
51
+ "strategy_used": o.strategy_used,
52
+ "duration_ms": o.duration_ms,
53
+ }
54
+ )
53
55
 
54
56
  for dk in memory_slice.domain_knowledge:
55
- result["domain_knowledge"].append({
56
- "id": dk.id,
57
- "domain": dk.domain,
58
- "fact": dk.fact,
59
- "confidence": dk.confidence,
60
- })
57
+ result["domain_knowledge"].append(
58
+ {
59
+ "id": dk.id,
60
+ "domain": dk.domain,
61
+ "fact": dk.fact,
62
+ "confidence": dk.confidence,
63
+ }
64
+ )
61
65
 
62
66
  for ap in memory_slice.anti_patterns:
63
- result["anti_patterns"].append({
64
- "id": ap.id,
65
- "pattern": ap.pattern,
66
- "why_bad": ap.why_bad,
67
- "better_alternative": ap.better_alternative,
68
- })
67
+ result["anti_patterns"].append(
68
+ {
69
+ "id": ap.id,
70
+ "pattern": ap.pattern,
71
+ "why_bad": ap.why_bad,
72
+ "better_alternative": ap.better_alternative,
73
+ }
74
+ )
69
75
 
70
76
  for p in memory_slice.preferences:
71
- result["preferences"].append({
72
- "id": p.id,
73
- "category": p.category,
74
- "preference": p.preference,
75
- })
77
+ result["preferences"].append(
78
+ {
79
+ "id": p.id,
80
+ "category": p.category,
81
+ "preference": p.preference,
82
+ }
83
+ )
76
84
 
77
85
  return result
78
86
 
@@ -97,6 +105,12 @@ def alma_retrieve(
97
105
  Returns:
98
106
  Dict containing the memory slice with relevant memories
99
107
  """
108
+ # Input validation
109
+ if not task or not task.strip():
110
+ return {"success": False, "error": "task cannot be empty"}
111
+ if not agent or not agent.strip():
112
+ return {"success": False, "error": "agent cannot be empty"}
113
+
100
114
  try:
101
115
  memories = alma.retrieve(
102
116
  task=task,
@@ -147,6 +161,16 @@ def alma_learn(
147
161
  Returns:
148
162
  Dict with learning result
149
163
  """
164
+ # Input validation
165
+ if not agent or not agent.strip():
166
+ return {"success": False, "error": "agent cannot be empty"}
167
+ if not task or not task.strip():
168
+ return {"success": False, "error": "task cannot be empty"}
169
+ if not outcome or not outcome.strip():
170
+ return {"success": False, "error": "outcome cannot be empty"}
171
+ if not strategy_used or not strategy_used.strip():
172
+ return {"success": False, "error": "strategy_used cannot be empty"}
173
+
150
174
  try:
151
175
  result = alma.learn(
152
176
  agent=agent,
@@ -162,7 +186,9 @@ def alma_learn(
162
186
  return {
163
187
  "success": True,
164
188
  "learned": result,
165
- "message": "Outcome recorded" if result else "Learning rejected (scope violation)",
189
+ "message": (
190
+ "Outcome recorded" if result else "Learning rejected (scope violation)"
191
+ ),
166
192
  }
167
193
 
168
194
  except Exception as e:
@@ -193,6 +219,14 @@ def alma_add_preference(
193
219
  Returns:
194
220
  Dict with the created preference
195
221
  """
222
+ # Input validation
223
+ if not user_id or not user_id.strip():
224
+ return {"success": False, "error": "user_id cannot be empty"}
225
+ if not category or not category.strip():
226
+ return {"success": False, "error": "category cannot be empty"}
227
+ if not preference or not preference.strip():
228
+ return {"success": False, "error": "preference cannot be empty"}
229
+
196
230
  try:
197
231
  pref = alma.add_user_preference(
198
232
  user_id=user_id,
@@ -240,6 +274,14 @@ def alma_add_knowledge(
240
274
  Returns:
241
275
  Dict with the created knowledge or rejection reason
242
276
  """
277
+ # Input validation
278
+ if not agent or not agent.strip():
279
+ return {"success": False, "error": "agent cannot be empty"}
280
+ if not domain or not domain.strip():
281
+ return {"success": False, "error": "domain cannot be empty"}
282
+ if not fact or not fact.strip():
283
+ return {"success": False, "error": "fact cannot be empty"}
284
+
243
285
  try:
244
286
  knowledge = alma.add_domain_knowledge(
245
287
  agent=agent,
@@ -372,3 +414,98 @@ def alma_health(alma: ALMA) -> Dict[str, Any]:
372
414
  "status": "unhealthy",
373
415
  "error": str(e),
374
416
  }
417
+
418
+
419
+ async def alma_consolidate(
420
+ alma: ALMA,
421
+ agent: str,
422
+ memory_type: str = "heuristics",
423
+ similarity_threshold: float = 0.85,
424
+ dry_run: bool = True,
425
+ ) -> Dict[str, Any]:
426
+ """
427
+ Consolidate similar memories to reduce redundancy.
428
+
429
+ This is ALMA's implementation of Mem0's core innovation - LLM-powered
430
+ deduplication that merges similar memories intelligently.
431
+
432
+ Args:
433
+ alma: ALMA instance
434
+ agent: Agent whose memories to consolidate
435
+ memory_type: Type of memory to consolidate
436
+ ("heuristics", "outcomes", "domain_knowledge", "anti_patterns")
437
+ similarity_threshold: Minimum cosine similarity to group (0.0 to 1.0)
438
+ Higher values are more conservative (fewer merges)
439
+ dry_run: If True, report what would be merged without actually modifying storage
440
+ Recommended for first run to preview changes
441
+
442
+ Returns:
443
+ Dict with consolidation results including:
444
+ - merged_count: Number of memories merged
445
+ - groups_found: Number of similar memory groups identified
446
+ - memories_processed: Total memories analyzed
447
+ - merge_details: List of merge operations (or planned operations if dry_run)
448
+ - errors: Any errors encountered
449
+ """
450
+ # Input validation
451
+ if not agent or not agent.strip():
452
+ return {"success": False, "error": "agent cannot be empty"}
453
+
454
+ valid_types = ["heuristics", "outcomes", "domain_knowledge", "anti_patterns"]
455
+ if memory_type not in valid_types:
456
+ return {
457
+ "success": False,
458
+ "error": f"memory_type must be one of: {', '.join(valid_types)}",
459
+ }
460
+
461
+ if not 0.0 <= similarity_threshold <= 1.0:
462
+ return {
463
+ "success": False,
464
+ "error": "similarity_threshold must be between 0.0 and 1.0",
465
+ }
466
+
467
+ try:
468
+ from alma.consolidation import ConsolidationEngine
469
+
470
+ # Create consolidation engine
471
+ engine = ConsolidationEngine(
472
+ storage=alma.storage,
473
+ embedder=None, # Will use default LocalEmbedder
474
+ llm_client=None, # LLM merging disabled by default
475
+ )
476
+
477
+ # Run consolidation
478
+ result = await engine.consolidate(
479
+ agent=agent,
480
+ project_id=alma.project_id,
481
+ memory_type=memory_type,
482
+ similarity_threshold=similarity_threshold,
483
+ use_llm=False, # LLM disabled - uses highest confidence merge
484
+ dry_run=dry_run,
485
+ )
486
+
487
+ # Invalidate cache after consolidation (if not dry run)
488
+ if not dry_run and result.merged_count > 0:
489
+ alma.retrieval.invalidate_cache(agent=agent, project_id=alma.project_id)
490
+
491
+ return {
492
+ "success": result.success,
493
+ "dry_run": dry_run,
494
+ "merged_count": result.merged_count,
495
+ "groups_found": result.groups_found,
496
+ "memories_processed": result.memories_processed,
497
+ "merge_details": result.merge_details,
498
+ "errors": result.errors,
499
+ "message": (
500
+ f"{'Would merge' if dry_run else 'Merged'} {result.merged_count} memories "
501
+ f"from {result.groups_found} similar groups "
502
+ f"(processed {result.memories_processed} total)"
503
+ ),
504
+ }
505
+
506
+ except Exception as e:
507
+ logger.exception(f"Error in alma_consolidate: {e}")
508
+ return {
509
+ "success": False,
510
+ "error": str(e),
511
+ }
alma/progress/__init__.py CHANGED
@@ -4,13 +4,13 @@ ALMA Progress Tracking Module.
4
4
  Track work items, progress, and suggest next actions.
5
5
  """
6
6
 
7
+ from alma.progress.tracker import ProgressTracker
7
8
  from alma.progress.types import (
8
- WorkItem,
9
- WorkItemStatus,
10
9
  ProgressLog,
11
10
  ProgressSummary,
11
+ WorkItem,
12
+ WorkItemStatus,
12
13
  )
13
- from alma.progress.tracker import ProgressTracker
14
14
 
15
15
  __all__ = [
16
16
  "WorkItem",
alma/progress/tracker.py CHANGED
@@ -5,27 +5,25 @@ Manages work items and provides progress tracking functionality.
5
5
  """
6
6
 
7
7
  import logging
8
- import uuid
9
8
  from datetime import datetime, timezone
10
- from typing import Optional, List, Dict, Any, Literal
9
+ from typing import Any, Dict, List, Literal, Optional
11
10
 
12
11
  from alma.progress.types import (
13
- WorkItem,
14
- WorkItemStatus,
15
12
  ProgressLog,
16
13
  ProgressSummary,
14
+ WorkItem,
15
+ WorkItemStatus,
17
16
  )
18
17
  from alma.storage.base import StorageBackend
19
18
 
20
-
21
19
  logger = logging.getLogger(__name__)
22
20
 
23
21
 
24
22
  SelectionStrategy = Literal[
25
- "priority", # Highest priority first
23
+ "priority", # Highest priority first
26
24
  "blocked_unblock", # Items that unblock others
27
- "quick_win", # Smallest/easiest first
28
- "fifo", # First in, first out
25
+ "quick_win", # Smallest/easiest first
26
+ "fifo", # First in, first out
29
27
  ]
30
28
 
31
29
 
@@ -176,12 +174,14 @@ class ProgressTracker:
176
174
  if notes:
177
175
  if "status_notes" not in item.metadata:
178
176
  item.metadata["status_notes"] = []
179
- item.metadata["status_notes"].append({
180
- "from": old_status,
181
- "to": status,
182
- "notes": notes,
183
- "timestamp": datetime.now(timezone.utc).isoformat(),
184
- })
177
+ item.metadata["status_notes"].append(
178
+ {
179
+ "from": old_status,
180
+ "to": status,
181
+ "notes": notes,
182
+ "timestamp": datetime.now(timezone.utc).isoformat(),
183
+ }
184
+ )
185
185
 
186
186
  logger.info(f"Status updated: {item_id} {old_status} -> {status}")
187
187
  return item
@@ -262,7 +262,8 @@ class ProgressTracker:
262
262
  ) -> List[WorkItem]:
263
263
  """Get items that can be worked on (not blocked, not done)."""
264
264
  return [
265
- item for item in self._work_items.values()
265
+ item
266
+ for item in self._work_items.values()
266
267
  if item.is_actionable()
267
268
  and (agent is None or item.agent == agent or item.agent is None)
268
269
  ]
@@ -312,7 +313,8 @@ class ProgressTracker:
312
313
  unblock_counts = {}
313
314
  for item in actionable:
314
315
  count = sum(
315
- 1 for other in self._work_items.values()
316
+ 1
317
+ for other in self._work_items.values()
316
318
  if item.id in other.blocked_by
317
319
  )
318
320
  unblock_counts[item.id] = count
@@ -439,9 +441,9 @@ class ProgressTracker:
439
441
  logs = self._progress_logs
440
442
 
441
443
  if agent:
442
- logs = [l for l in logs if l.agent == agent]
444
+ logs = [log for log in logs if log.agent == agent]
443
445
  if session_id:
444
- logs = [l for l in logs if l.session_id == session_id]
446
+ logs = [log for log in logs if log.session_id == session_id]
445
447
 
446
448
  # Sort by created_at descending and limit
447
449
  logs.sort(key=lambda x: x.created_at, reverse=True)
@@ -530,8 +532,12 @@ class ProgressTracker:
530
532
  "attempt_count": item.attempt_count,
531
533
  "created_at": item.created_at.isoformat(),
532
534
  "updated_at": item.updated_at.isoformat(),
533
- "started_at": item.started_at.isoformat() if item.started_at else None,
534
- "completed_at": item.completed_at.isoformat() if item.completed_at else None,
535
+ "started_at": (
536
+ item.started_at.isoformat() if item.started_at else None
537
+ ),
538
+ "completed_at": (
539
+ item.completed_at.isoformat() if item.completed_at else None
540
+ ),
535
541
  "metadata": item.metadata,
536
542
  }
537
543
  for item in self._work_items.values()
alma/progress/types.py CHANGED
@@ -4,19 +4,18 @@ Progress Tracking Types.
4
4
  Data models for tracking work items and progress.
5
5
  """
6
6
 
7
+ import uuid
7
8
  from dataclasses import dataclass, field
8
9
  from datetime import datetime, timezone
9
- from typing import Optional, List, Dict, Any, Literal
10
- import uuid
11
-
10
+ from typing import Any, Dict, List, Literal, Optional
12
11
 
13
12
  WorkItemStatus = Literal[
14
- "pending", # Not started
13
+ "pending", # Not started
15
14
  "in_progress", # Currently being worked on
16
- "blocked", # Waiting on something
17
- "review", # Completed, awaiting review
18
- "done", # Completed and verified
19
- "failed", # Could not complete
15
+ "blocked", # Waiting on something
16
+ "review", # Completed, awaiting review
17
+ "done", # Completed and verified
18
+ "failed", # Could not complete
20
19
  ]
21
20
 
22
21
 
@@ -124,10 +123,7 @@ class WorkItem:
124
123
 
125
124
  def is_actionable(self) -> bool:
126
125
  """Check if work item can be worked on."""
127
- return (
128
- self.status in ("pending", "in_progress")
129
- and len(self.blocked_by) == 0
130
- )
126
+ return self.status in ("pending", "in_progress") and len(self.blocked_by) == 0
131
127
 
132
128
 
133
129
  @dataclass
alma/py.typed ADDED
File without changes
@@ -4,29 +4,29 @@ ALMA Retrieval Engine.
4
4
  Provides semantic search, scoring, and caching for memory retrieval.
5
5
  """
6
6
 
7
- from alma.retrieval.engine import RetrievalEngine
8
- from alma.retrieval.scoring import (
9
- MemoryScorer,
10
- ScoringWeights,
11
- ScoredItem,
12
- compute_composite_score,
13
- )
14
7
  from alma.retrieval.cache import (
15
8
  CacheBackend,
16
- RetrievalCache,
17
- RedisCache,
18
- NullCache,
19
9
  CacheEntry,
20
10
  CacheStats,
11
+ NullCache,
21
12
  PerformanceMetrics,
13
+ RedisCache,
14
+ RetrievalCache,
22
15
  create_cache,
23
16
  )
24
17
  from alma.retrieval.embeddings import (
18
+ AzureEmbedder,
25
19
  EmbeddingProvider,
26
20
  LocalEmbedder,
27
- AzureEmbedder,
28
21
  MockEmbedder,
29
22
  )
23
+ from alma.retrieval.engine import RetrievalEngine
24
+ from alma.retrieval.scoring import (
25
+ MemoryScorer,
26
+ ScoredItem,
27
+ ScoringWeights,
28
+ compute_composite_score,
29
+ )
30
30
 
31
31
  __all__ = [
32
32
  # Engine
alma/retrieval/cache.py CHANGED
@@ -5,16 +5,15 @@ Multi-backend caching layer for retrieval results with TTL-based expiration.
5
5
  Supports in-memory and Redis backends with performance monitoring.
6
6
  """
7
7
 
8
- import time
9
- import json
10
8
  import hashlib
11
- import threading
9
+ import json
12
10
  import logging
11
+ import threading
12
+ import time
13
13
  from abc import ABC, abstractmethod
14
- from typing import Optional, Dict, Any, List, Callable, Tuple
15
- from dataclasses import dataclass, field, asdict
14
+ from dataclasses import dataclass, field
16
15
  from datetime import datetime, timezone
17
- from contextlib import contextmanager
16
+ from typing import Any, Callable, Dict, List, Optional
18
17
 
19
18
  from alma.types import MemorySlice
20
19
 
@@ -27,6 +26,7 @@ logger = logging.getLogger(__name__)
27
26
  @dataclass
28
27
  class CacheEntry:
29
28
  """A cached retrieval result with metadata."""
29
+
30
30
  result: MemorySlice
31
31
  created_at: float # time.time() timestamp
32
32
  expires_at: float
@@ -41,6 +41,7 @@ class CacheEntry:
41
41
  @dataclass
42
42
  class CacheStats:
43
43
  """Statistics about cache performance."""
44
+
44
45
  hits: int = 0
45
46
  misses: int = 0
46
47
  evictions: int = 0
@@ -81,6 +82,7 @@ class CacheStats:
81
82
  @dataclass
82
83
  class PerformanceMetrics:
83
84
  """Tracks timing metrics for performance analysis."""
85
+
84
86
  get_times: List[float] = field(default_factory=list)
85
87
  set_times: List[float] = field(default_factory=list)
86
88
  max_samples: int = 1000
@@ -89,13 +91,13 @@ class PerformanceMetrics:
89
91
  """Record a get operation time."""
90
92
  self.get_times.append(duration_ms)
91
93
  if len(self.get_times) > self.max_samples:
92
- self.get_times = self.get_times[-self.max_samples:]
94
+ self.get_times = self.get_times[-self.max_samples :]
93
95
 
94
96
  def record_set(self, duration_ms: float):
95
97
  """Record a set operation time."""
96
98
  self.set_times.append(duration_ms)
97
99
  if len(self.set_times) > self.max_samples:
98
- self.set_times = self.set_times[-self.max_samples:]
100
+ self.set_times = self.set_times[-self.max_samples :]
99
101
 
100
102
  def get_percentile(self, times: List[float], percentile: float) -> float:
101
103
  """Calculate percentile from timing data."""
@@ -430,9 +432,7 @@ class RetrievalCache(CacheBackend):
430
432
  """Remove all expired entries."""
431
433
  now = time.time()
432
434
  expired = [
433
- (key, entry)
434
- for key, entry in self._cache.items()
435
- if now > entry.expires_at
435
+ (key, entry) for key, entry in self._cache.items() if now > entry.expires_at
436
436
  ]
437
437
 
438
438
  for key, entry in expired:
@@ -587,13 +587,12 @@ class RedisCache(CacheBackend):
587
587
  # Test connection
588
588
  self._redis.ping()
589
589
  logger.info(f"Connected to Redis at {host}:{port}")
590
- except ImportError:
590
+ except ImportError as err:
591
591
  raise ImportError(
592
- "redis package required for RedisCache. "
593
- "Install with: pip install redis"
594
- )
592
+ "redis package required for RedisCache. Install with: pip install redis"
593
+ ) from err
595
594
  except Exception as e:
596
- raise ConnectionError(f"Failed to connect to Redis: {e}")
595
+ raise ConnectionError(f"Failed to connect to Redis: {e}") from e
597
596
 
598
597
  def set_hooks(
599
598
  self,
@@ -641,9 +640,9 @@ class RedisCache(CacheBackend):
641
640
  "confidence": h.confidence,
642
641
  "occurrence_count": h.occurrence_count,
643
642
  "success_count": h.success_count,
644
- "last_validated": h.last_validated.isoformat()
645
- if h.last_validated
646
- else None,
643
+ "last_validated": (
644
+ h.last_validated.isoformat() if h.last_validated else None
645
+ ),
647
646
  "created_at": h.created_at.isoformat() if h.created_at else None,
648
647
  }
649
648
  for h in result.heuristics
@@ -703,11 +702,11 @@ class RedisCache(CacheBackend):
703
702
  def _deserialize_result(self, data: bytes) -> MemorySlice:
704
703
  """Deserialize bytes to MemorySlice."""
705
704
  from alma.types import (
705
+ AntiPattern,
706
+ DomainKnowledge,
706
707
  Heuristic,
707
708
  Outcome,
708
709
  UserPreference,
709
- DomainKnowledge,
710
- AntiPattern,
711
710
  )
712
711
 
713
712
  obj = json.loads(data.decode("utf-8"))
@@ -59,11 +59,11 @@ class LocalEmbedder(EmbeddingProvider):
59
59
  self._model = SentenceTransformer(self.model_name)
60
60
  self._dimension = self._model.get_sentence_embedding_dimension()
61
61
  logger.info(f"Model loaded, dimension: {self._dimension}")
62
- except ImportError:
62
+ except ImportError as err:
63
63
  raise ImportError(
64
64
  "sentence-transformers is required for local embeddings. "
65
65
  "Install with: pip install sentence-transformers"
66
- )
66
+ ) from err
67
67
 
68
68
  def encode(self, text: str) -> List[float]:
69
69
  """Generate embedding for text."""
@@ -134,11 +134,11 @@ class AzureEmbedder(EmbeddingProvider):
134
134
  api_key=self.api_key,
135
135
  api_version=self.api_version,
136
136
  )
137
- except ImportError:
137
+ except ImportError as err:
138
138
  raise ImportError(
139
139
  "openai is required for Azure embeddings. "
140
140
  "Install with: pip install openai"
141
- )
141
+ ) from err
142
142
  return self._client
143
143
 
144
144
  def encode(self, text: str) -> List[float]: