alma-memory 0.4.0__py3-none-any.whl → 0.5.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (94) hide show
  1. alma/__init__.py +121 -45
  2. alma/confidence/__init__.py +1 -1
  3. alma/confidence/engine.py +92 -58
  4. alma/confidence/types.py +34 -14
  5. alma/config/loader.py +3 -2
  6. alma/consolidation/__init__.py +23 -0
  7. alma/consolidation/engine.py +678 -0
  8. alma/consolidation/prompts.py +84 -0
  9. alma/core.py +136 -28
  10. alma/domains/__init__.py +6 -6
  11. alma/domains/factory.py +12 -9
  12. alma/domains/schemas.py +17 -3
  13. alma/domains/types.py +8 -4
  14. alma/events/__init__.py +75 -0
  15. alma/events/emitter.py +284 -0
  16. alma/events/storage_mixin.py +246 -0
  17. alma/events/types.py +126 -0
  18. alma/events/webhook.py +425 -0
  19. alma/exceptions.py +49 -0
  20. alma/extraction/__init__.py +31 -0
  21. alma/extraction/auto_learner.py +265 -0
  22. alma/extraction/extractor.py +420 -0
  23. alma/graph/__init__.py +106 -0
  24. alma/graph/backends/__init__.py +32 -0
  25. alma/graph/backends/kuzu.py +624 -0
  26. alma/graph/backends/memgraph.py +432 -0
  27. alma/graph/backends/memory.py +236 -0
  28. alma/graph/backends/neo4j.py +417 -0
  29. alma/graph/base.py +159 -0
  30. alma/graph/extraction.py +198 -0
  31. alma/graph/store.py +860 -0
  32. alma/harness/__init__.py +4 -4
  33. alma/harness/base.py +18 -9
  34. alma/harness/domains.py +27 -11
  35. alma/initializer/__init__.py +1 -1
  36. alma/initializer/initializer.py +51 -43
  37. alma/initializer/types.py +25 -17
  38. alma/integration/__init__.py +9 -9
  39. alma/integration/claude_agents.py +32 -20
  40. alma/integration/helena.py +32 -22
  41. alma/integration/victor.py +57 -33
  42. alma/learning/__init__.py +27 -27
  43. alma/learning/forgetting.py +198 -148
  44. alma/learning/heuristic_extractor.py +40 -24
  45. alma/learning/protocols.py +65 -17
  46. alma/learning/validation.py +7 -2
  47. alma/mcp/__init__.py +4 -4
  48. alma/mcp/__main__.py +2 -1
  49. alma/mcp/resources.py +17 -16
  50. alma/mcp/server.py +102 -44
  51. alma/mcp/tools.py +180 -45
  52. alma/observability/__init__.py +84 -0
  53. alma/observability/config.py +302 -0
  54. alma/observability/logging.py +424 -0
  55. alma/observability/metrics.py +583 -0
  56. alma/observability/tracing.py +440 -0
  57. alma/progress/__init__.py +3 -3
  58. alma/progress/tracker.py +26 -20
  59. alma/progress/types.py +8 -12
  60. alma/py.typed +0 -0
  61. alma/retrieval/__init__.py +11 -11
  62. alma/retrieval/cache.py +20 -21
  63. alma/retrieval/embeddings.py +4 -4
  64. alma/retrieval/engine.py +179 -39
  65. alma/retrieval/scoring.py +73 -63
  66. alma/session/__init__.py +2 -2
  67. alma/session/manager.py +5 -5
  68. alma/session/types.py +5 -4
  69. alma/storage/__init__.py +70 -0
  70. alma/storage/azure_cosmos.py +414 -133
  71. alma/storage/base.py +215 -4
  72. alma/storage/chroma.py +1443 -0
  73. alma/storage/constants.py +103 -0
  74. alma/storage/file_based.py +59 -28
  75. alma/storage/migrations/__init__.py +21 -0
  76. alma/storage/migrations/base.py +321 -0
  77. alma/storage/migrations/runner.py +323 -0
  78. alma/storage/migrations/version_stores.py +337 -0
  79. alma/storage/migrations/versions/__init__.py +11 -0
  80. alma/storage/migrations/versions/v1_0_0.py +373 -0
  81. alma/storage/pinecone.py +1080 -0
  82. alma/storage/postgresql.py +1559 -0
  83. alma/storage/qdrant.py +1306 -0
  84. alma/storage/sqlite_local.py +504 -60
  85. alma/testing/__init__.py +46 -0
  86. alma/testing/factories.py +301 -0
  87. alma/testing/mocks.py +389 -0
  88. alma/types.py +62 -14
  89. alma_memory-0.5.1.dist-info/METADATA +939 -0
  90. alma_memory-0.5.1.dist-info/RECORD +93 -0
  91. {alma_memory-0.4.0.dist-info → alma_memory-0.5.1.dist-info}/WHEEL +1 -1
  92. alma_memory-0.4.0.dist-info/METADATA +0 -488
  93. alma_memory-0.4.0.dist-info/RECORD +0 -52
  94. {alma_memory-0.4.0.dist-info → alma_memory-0.5.1.dist-info}/top_level.txt +0 -0
alma/retrieval/cache.py CHANGED
@@ -5,16 +5,15 @@ Multi-backend caching layer for retrieval results with TTL-based expiration.
5
5
  Supports in-memory and Redis backends with performance monitoring.
6
6
  """
7
7
 
8
- import time
9
- import json
10
8
  import hashlib
11
- import threading
9
+ import json
12
10
  import logging
11
+ import threading
12
+ import time
13
13
  from abc import ABC, abstractmethod
14
- from typing import Optional, Dict, Any, List, Callable, Tuple
15
- from dataclasses import dataclass, field, asdict
14
+ from dataclasses import dataclass, field
16
15
  from datetime import datetime, timezone
17
- from contextlib import contextmanager
16
+ from typing import Any, Callable, Dict, List, Optional
18
17
 
19
18
  from alma.types import MemorySlice
20
19
 
@@ -27,6 +26,7 @@ logger = logging.getLogger(__name__)
27
26
  @dataclass
28
27
  class CacheEntry:
29
28
  """A cached retrieval result with metadata."""
29
+
30
30
  result: MemorySlice
31
31
  created_at: float # time.time() timestamp
32
32
  expires_at: float
@@ -41,6 +41,7 @@ class CacheEntry:
41
41
  @dataclass
42
42
  class CacheStats:
43
43
  """Statistics about cache performance."""
44
+
44
45
  hits: int = 0
45
46
  misses: int = 0
46
47
  evictions: int = 0
@@ -81,6 +82,7 @@ class CacheStats:
81
82
  @dataclass
82
83
  class PerformanceMetrics:
83
84
  """Tracks timing metrics for performance analysis."""
85
+
84
86
  get_times: List[float] = field(default_factory=list)
85
87
  set_times: List[float] = field(default_factory=list)
86
88
  max_samples: int = 1000
@@ -89,13 +91,13 @@ class PerformanceMetrics:
89
91
  """Record a get operation time."""
90
92
  self.get_times.append(duration_ms)
91
93
  if len(self.get_times) > self.max_samples:
92
- self.get_times = self.get_times[-self.max_samples:]
94
+ self.get_times = self.get_times[-self.max_samples :]
93
95
 
94
96
  def record_set(self, duration_ms: float):
95
97
  """Record a set operation time."""
96
98
  self.set_times.append(duration_ms)
97
99
  if len(self.set_times) > self.max_samples:
98
- self.set_times = self.set_times[-self.max_samples:]
100
+ self.set_times = self.set_times[-self.max_samples :]
99
101
 
100
102
  def get_percentile(self, times: List[float], percentile: float) -> float:
101
103
  """Calculate percentile from timing data."""
@@ -430,9 +432,7 @@ class RetrievalCache(CacheBackend):
430
432
  """Remove all expired entries."""
431
433
  now = time.time()
432
434
  expired = [
433
- (key, entry)
434
- for key, entry in self._cache.items()
435
- if now > entry.expires_at
435
+ (key, entry) for key, entry in self._cache.items() if now > entry.expires_at
436
436
  ]
437
437
 
438
438
  for key, entry in expired:
@@ -587,13 +587,12 @@ class RedisCache(CacheBackend):
587
587
  # Test connection
588
588
  self._redis.ping()
589
589
  logger.info(f"Connected to Redis at {host}:{port}")
590
- except ImportError:
590
+ except ImportError as err:
591
591
  raise ImportError(
592
- "redis package required for RedisCache. "
593
- "Install with: pip install redis"
594
- )
592
+ "redis package required for RedisCache. Install with: pip install redis"
593
+ ) from err
595
594
  except Exception as e:
596
- raise ConnectionError(f"Failed to connect to Redis: {e}")
595
+ raise ConnectionError(f"Failed to connect to Redis: {e}") from e
597
596
 
598
597
  def set_hooks(
599
598
  self,
@@ -641,9 +640,9 @@ class RedisCache(CacheBackend):
641
640
  "confidence": h.confidence,
642
641
  "occurrence_count": h.occurrence_count,
643
642
  "success_count": h.success_count,
644
- "last_validated": h.last_validated.isoformat()
645
- if h.last_validated
646
- else None,
643
+ "last_validated": (
644
+ h.last_validated.isoformat() if h.last_validated else None
645
+ ),
647
646
  "created_at": h.created_at.isoformat() if h.created_at else None,
648
647
  }
649
648
  for h in result.heuristics
@@ -703,11 +702,11 @@ class RedisCache(CacheBackend):
703
702
  def _deserialize_result(self, data: bytes) -> MemorySlice:
704
703
  """Deserialize bytes to MemorySlice."""
705
704
  from alma.types import (
705
+ AntiPattern,
706
+ DomainKnowledge,
706
707
  Heuristic,
707
708
  Outcome,
708
709
  UserPreference,
709
- DomainKnowledge,
710
- AntiPattern,
711
710
  )
712
711
 
713
712
  obj = json.loads(data.decode("utf-8"))
@@ -59,11 +59,11 @@ class LocalEmbedder(EmbeddingProvider):
59
59
  self._model = SentenceTransformer(self.model_name)
60
60
  self._dimension = self._model.get_sentence_embedding_dimension()
61
61
  logger.info(f"Model loaded, dimension: {self._dimension}")
62
- except ImportError:
62
+ except ImportError as err:
63
63
  raise ImportError(
64
64
  "sentence-transformers is required for local embeddings. "
65
65
  "Install with: pip install sentence-transformers"
66
- )
66
+ ) from err
67
67
 
68
68
  def encode(self, text: str) -> List[float]:
69
69
  """Generate embedding for text."""
@@ -134,11 +134,11 @@ class AzureEmbedder(EmbeddingProvider):
134
134
  api_key=self.api_key,
135
135
  api_version=self.api_version,
136
136
  )
137
- except ImportError:
137
+ except ImportError as err:
138
138
  raise ImportError(
139
139
  "openai is required for Azure embeddings. "
140
140
  "Install with: pip install openai"
141
- )
141
+ ) from err
142
142
  return self._client
143
143
 
144
144
  def encode(self, text: str) -> List[float]:
alma/retrieval/engine.py CHANGED
@@ -4,16 +4,21 @@ ALMA Retrieval Engine.
4
4
  Handles semantic search and memory retrieval with scoring and caching.
5
5
  """
6
6
 
7
- import time
8
7
  import logging
9
- from typing import Optional, List, Dict, Any
8
+ import time
9
+ from typing import Any, Dict, List, Optional
10
10
 
11
- from alma.types import MemorySlice, MemoryScope
11
+ from alma.observability.logging import get_logger
12
+ from alma.observability.metrics import get_metrics
13
+ from alma.observability.tracing import get_tracer
14
+ from alma.retrieval.cache import NullCache, RetrievalCache
15
+ from alma.retrieval.scoring import MemoryScorer, ScoredItem, ScoringWeights
12
16
  from alma.storage.base import StorageBackend
13
- from alma.retrieval.scoring import MemoryScorer, ScoringWeights, ScoredItem
14
- from alma.retrieval.cache import RetrievalCache, NullCache
17
+ from alma.types import MemoryScope, MemorySlice
15
18
 
16
19
  logger = logging.getLogger(__name__)
20
+ structured_logger = get_logger(__name__)
21
+ tracer = get_tracer(__name__)
17
22
 
18
23
 
19
24
  class RetrievalEngine:
@@ -81,18 +86,24 @@ class RetrievalEngine:
81
86
  top_k: int = 5,
82
87
  scope: Optional[MemoryScope] = None,
83
88
  bypass_cache: bool = False,
89
+ include_shared: bool = True,
84
90
  ) -> MemorySlice:
85
91
  """
86
92
  Retrieve relevant memories for a task.
87
93
 
94
+ Supports multi-agent memory sharing: if a scope is provided with
95
+ inherit_from agents, memories from those agents will also be included.
96
+ Shared memories have their origin tracked in the metadata['shared_from'] field.
97
+
88
98
  Args:
89
99
  query: Task description to find relevant memories for
90
100
  agent: Agent requesting memories
91
101
  project_id: Project context
92
102
  user_id: Optional user for preference retrieval
93
103
  top_k: Max items per memory type
94
- scope: Agent's learning scope for filtering
104
+ scope: Agent's learning scope for filtering (enables multi-agent sharing)
95
105
  bypass_cache: Skip cache lookup/storage
106
+ include_shared: If True and scope has inherit_from, include shared memories
96
107
 
97
108
  Returns:
98
109
  MemorySlice with relevant memories, scored and ranked
@@ -110,36 +121,79 @@ class RetrievalEngine:
110
121
  # Generate embedding for query
111
122
  query_embedding = self._get_embedding(query)
112
123
 
113
- # Retrieve raw items from storage (with vector search)
114
- raw_heuristics = self.storage.get_heuristics(
115
- project_id=project_id,
116
- agent=agent,
117
- embedding=query_embedding,
118
- top_k=top_k * 2, # Get extra for scoring/filtering
119
- min_confidence=0.0, # Let scorer handle filtering
120
- )
121
-
122
- raw_outcomes = self.storage.get_outcomes(
123
- project_id=project_id,
124
- agent=agent,
125
- embedding=query_embedding,
126
- top_k=top_k * 2,
127
- success_only=False,
128
- )
124
+ # Determine which agents to query based on scope
125
+ agents_to_query = [agent]
126
+ if include_shared and scope and scope.inherit_from:
127
+ agents_to_query = scope.get_readable_agents()
128
+ logger.debug(
129
+ f"Multi-agent retrieval for {agent}: querying {agents_to_query}"
130
+ )
129
131
 
130
- raw_domain_knowledge = self.storage.get_domain_knowledge(
131
- project_id=project_id,
132
- agent=agent,
133
- embedding=query_embedding,
134
- top_k=top_k * 2,
135
- )
132
+ # Retrieve raw items from storage (with vector search)
133
+ if len(agents_to_query) > 1:
134
+ # Use multi-agent query methods
135
+ raw_heuristics = self.storage.get_heuristics_for_agents(
136
+ project_id=project_id,
137
+ agents=agents_to_query,
138
+ embedding=query_embedding,
139
+ top_k=top_k * 2,
140
+ min_confidence=0.0,
141
+ )
142
+ raw_outcomes = self.storage.get_outcomes_for_agents(
143
+ project_id=project_id,
144
+ agents=agents_to_query,
145
+ embedding=query_embedding,
146
+ top_k=top_k * 2,
147
+ success_only=False,
148
+ )
149
+ raw_domain_knowledge = self.storage.get_domain_knowledge_for_agents(
150
+ project_id=project_id,
151
+ agents=agents_to_query,
152
+ embedding=query_embedding,
153
+ top_k=top_k * 2,
154
+ )
155
+ raw_anti_patterns = self.storage.get_anti_patterns_for_agents(
156
+ project_id=project_id,
157
+ agents=agents_to_query,
158
+ embedding=query_embedding,
159
+ top_k=top_k * 2,
160
+ )
136
161
 
137
- raw_anti_patterns = self.storage.get_anti_patterns(
138
- project_id=project_id,
139
- agent=agent,
140
- embedding=query_embedding,
141
- top_k=top_k * 2,
142
- )
162
+ # Mark shared memories with origin tracking
163
+ raw_heuristics = self._mark_shared_memories(raw_heuristics, agent)
164
+ raw_outcomes = self._mark_shared_memories(raw_outcomes, agent)
165
+ raw_domain_knowledge = self._mark_shared_memories(
166
+ raw_domain_knowledge, agent
167
+ )
168
+ raw_anti_patterns = self._mark_shared_memories(raw_anti_patterns, agent)
169
+ else:
170
+ # Single agent query (original behavior)
171
+ raw_heuristics = self.storage.get_heuristics(
172
+ project_id=project_id,
173
+ agent=agent,
174
+ embedding=query_embedding,
175
+ top_k=top_k * 2,
176
+ min_confidence=0.0,
177
+ )
178
+ raw_outcomes = self.storage.get_outcomes(
179
+ project_id=project_id,
180
+ agent=agent,
181
+ embedding=query_embedding,
182
+ top_k=top_k * 2,
183
+ success_only=False,
184
+ )
185
+ raw_domain_knowledge = self.storage.get_domain_knowledge(
186
+ project_id=project_id,
187
+ agent=agent,
188
+ embedding=query_embedding,
189
+ top_k=top_k * 2,
190
+ )
191
+ raw_anti_patterns = self.storage.get_anti_patterns(
192
+ project_id=project_id,
193
+ agent=agent,
194
+ embedding=query_embedding,
195
+ top_k=top_k * 2,
196
+ )
143
197
 
144
198
  # Score and rank each type
145
199
  scored_heuristics = self.scorer.score_heuristics(raw_heuristics)
@@ -182,6 +236,31 @@ class RetrievalEngine:
182
236
 
183
237
  return result
184
238
 
239
+ def _mark_shared_memories(
240
+ self,
241
+ memories: List[Any],
242
+ requesting_agent: str,
243
+ ) -> List[Any]:
244
+ """
245
+ Mark memories that came from other agents with their origin.
246
+
247
+ Adds 'shared_from' to metadata for memories not owned by requesting_agent.
248
+ This maintains write isolation - only the owning agent can modify their memories.
249
+
250
+ Args:
251
+ memories: List of memory objects (Heuristic, Outcome, etc.)
252
+ requesting_agent: The agent that requested the memories
253
+
254
+ Returns:
255
+ Same memories with shared_from metadata added where applicable
256
+ """
257
+ for memory in memories:
258
+ if hasattr(memory, "agent") and memory.agent != requesting_agent:
259
+ if not hasattr(memory, "metadata") or memory.metadata is None:
260
+ memory.metadata = {}
261
+ memory.metadata["shared_from"] = memory.agent
262
+ return memories
263
+
185
264
  def _extract_top_k(
186
265
  self,
187
266
  scored_items: List[ScoredItem],
@@ -211,19 +290,78 @@ class RetrievalEngine:
211
290
  if self._embedder is None:
212
291
  self._embedder = self._init_embedder()
213
292
 
214
- return self._embedder.encode(text)
293
+ start_time = time.time()
294
+ embedding = self._embedder.encode(text)
295
+ duration_ms = (time.time() - start_time) * 1000
296
+
297
+ # Record embedding generation metrics
298
+ metrics = get_metrics()
299
+ metrics.record_embedding_latency(
300
+ duration_ms=duration_ms,
301
+ provider=self.embedding_provider,
302
+ batch_size=1,
303
+ )
304
+
305
+ return embedding
215
306
 
216
307
  def _init_embedder(self):
217
308
  """Initialize the embedding model based on provider config."""
218
309
  if self.embedding_provider == "azure":
219
310
  from alma.retrieval.embeddings import AzureEmbedder
220
- return AzureEmbedder()
311
+
312
+ embedder = AzureEmbedder()
221
313
  elif self.embedding_provider == "mock":
222
314
  from alma.retrieval.embeddings import MockEmbedder
223
- return MockEmbedder()
315
+
316
+ embedder = MockEmbedder()
224
317
  else:
225
318
  from alma.retrieval.embeddings import LocalEmbedder
226
- return LocalEmbedder()
319
+
320
+ embedder = LocalEmbedder()
321
+
322
+ # Validate embedding dimension matches storage configuration
323
+ self._validate_embedding_dimension(embedder)
324
+ return embedder
325
+
326
+ def _validate_embedding_dimension(self, embedder) -> None:
327
+ """
328
+ Validate that embedding provider dimension matches storage configuration.
329
+
330
+ Raises:
331
+ ValueError: If dimensions don't match
332
+ """
333
+ provider_dim = embedder.dimension
334
+
335
+ # Check if storage has embedding_dim attribute
336
+ storage_dim = getattr(self.storage, "embedding_dim", None)
337
+ if storage_dim is None:
338
+ logger.debug(
339
+ "Storage backend doesn't specify embedding_dim, skipping validation"
340
+ )
341
+ return
342
+
343
+ # Skip validation if storage_dim is not an integer (e.g., mock objects)
344
+ if not isinstance(storage_dim, int):
345
+ logger.debug(
346
+ f"Storage embedding_dim is not an integer ({type(storage_dim)}), "
347
+ "skipping validation"
348
+ )
349
+ return
350
+
351
+ if provider_dim != storage_dim:
352
+ raise ValueError(
353
+ f"Embedding dimension mismatch: provider '{self.embedding_provider}' "
354
+ f"outputs {provider_dim} dimensions, but storage is configured for "
355
+ f"{storage_dim} dimensions. Update your config's embedding_dim to "
356
+ f"match the provider, or use a different embedding provider.\n"
357
+ f" - local (all-MiniLM-L6-v2): 384 dimensions\n"
358
+ f" - azure (text-embedding-3-small): 1536 dimensions"
359
+ )
360
+
361
+ logger.info(
362
+ f"Embedding dimension validated: {provider_dim} "
363
+ f"(provider: {self.embedding_provider})"
364
+ )
227
365
 
228
366
  def invalidate_cache(
229
367
  self,
@@ -280,7 +418,9 @@ class RetrievalEngine:
280
418
  self.scorer.weights = ScoringWeights(
281
419
  similarity=similarity if similarity is not None else current.similarity,
282
420
  recency=recency if recency is not None else current.recency,
283
- success_rate=success_rate if success_rate is not None else current.success_rate,
421
+ success_rate=(
422
+ success_rate if success_rate is not None else current.success_rate
423
+ ),
284
424
  confidence=confidence if confidence is not None else current.confidence,
285
425
  )
286
426
  # Clear cache since scoring changed
alma/retrieval/scoring.py CHANGED
@@ -5,11 +5,11 @@ Combines semantic similarity, recency, and success rate for optimal retrieval.
5
5
  """
6
6
 
7
7
  import math
8
- from datetime import datetime, timezone
9
- from typing import List, Dict, Any, Optional, TypeVar, Callable
10
8
  from dataclasses import dataclass
9
+ from datetime import datetime, timezone
10
+ from typing import Any, List, Optional
11
11
 
12
- from alma.types import Heuristic, Outcome, DomainKnowledge, AntiPattern
12
+ from alma.types import AntiPattern, DomainKnowledge, Heuristic, Outcome
13
13
 
14
14
 
15
15
  @dataclass
@@ -19,10 +19,11 @@ class ScoringWeights:
19
19
 
20
20
  All weights should sum to 1.0 for normalized scores.
21
21
  """
22
- similarity: float = 0.4 # Semantic relevance to query
23
- recency: float = 0.3 # How recently the memory was validated/used
24
- success_rate: float = 0.2 # Historical success rate
25
- confidence: float = 0.1 # Stored confidence score
22
+
23
+ similarity: float = 0.4 # Semantic relevance to query
24
+ recency: float = 0.3 # How recently the memory was validated/used
25
+ success_rate: float = 0.2 # Historical success rate
26
+ confidence: float = 0.1 # Stored confidence score
26
27
 
27
28
  def __post_init__(self):
28
29
  """Validate weights sum to approximately 1.0."""
@@ -38,6 +39,7 @@ class ScoringWeights:
38
39
  @dataclass
39
40
  class ScoredItem:
40
41
  """A memory item with its computed score."""
42
+
41
43
  item: Any
42
44
  score: float
43
45
  similarity_score: float
@@ -93,26 +95,28 @@ class MemoryScorer:
93
95
  similarities = similarities or [1.0] * len(heuristics)
94
96
  scored = []
95
97
 
96
- for h, sim in zip(heuristics, similarities):
98
+ for h, sim in zip(heuristics, similarities, strict=False):
97
99
  recency = self._compute_recency_score(h.last_validated)
98
100
  success = h.success_rate
99
101
  confidence = h.confidence
100
102
 
101
103
  total = (
102
- self.weights.similarity * sim +
103
- self.weights.recency * recency +
104
- self.weights.success_rate * success +
105
- self.weights.confidence * confidence
104
+ self.weights.similarity * sim
105
+ + self.weights.recency * recency
106
+ + self.weights.success_rate * success
107
+ + self.weights.confidence * confidence
106
108
  )
107
109
 
108
- scored.append(ScoredItem(
109
- item=h,
110
- score=total,
111
- similarity_score=sim,
112
- recency_score=recency,
113
- success_score=success,
114
- confidence_score=confidence,
115
- ))
110
+ scored.append(
111
+ ScoredItem(
112
+ item=h,
113
+ score=total,
114
+ similarity_score=sim,
115
+ recency_score=recency,
116
+ success_score=success,
117
+ confidence_score=confidence,
118
+ )
119
+ )
116
120
 
117
121
  return sorted(scored, key=lambda x: -x.score)
118
122
 
@@ -140,7 +144,7 @@ class MemoryScorer:
140
144
  similarities = similarities or [1.0] * len(outcomes)
141
145
  scored = []
142
146
 
143
- for o, sim in zip(outcomes, similarities):
147
+ for o, sim in zip(outcomes, similarities, strict=False):
144
148
  recency = self._compute_recency_score(o.timestamp)
145
149
  # Success gets full score, failure gets partial (still useful to learn from)
146
150
  success = 1.0 if o.success else 0.3
@@ -148,20 +152,22 @@ class MemoryScorer:
148
152
  confidence = 1.0
149
153
 
150
154
  total = (
151
- self.weights.similarity * sim +
152
- self.weights.recency * recency +
153
- self.weights.success_rate * success +
154
- self.weights.confidence * confidence
155
+ self.weights.similarity * sim
156
+ + self.weights.recency * recency
157
+ + self.weights.success_rate * success
158
+ + self.weights.confidence * confidence
155
159
  )
156
160
 
157
- scored.append(ScoredItem(
158
- item=o,
159
- score=total,
160
- similarity_score=sim,
161
- recency_score=recency,
162
- success_score=success,
163
- confidence_score=confidence,
164
- ))
161
+ scored.append(
162
+ ScoredItem(
163
+ item=o,
164
+ score=total,
165
+ similarity_score=sim,
166
+ recency_score=recency,
167
+ success_score=success,
168
+ confidence_score=confidence,
169
+ )
170
+ )
165
171
 
166
172
  return sorted(scored, key=lambda x: -x.score)
167
173
 
@@ -186,27 +192,29 @@ class MemoryScorer:
186
192
  similarities = similarities or [1.0] * len(knowledge)
187
193
  scored = []
188
194
 
189
- for dk, sim in zip(knowledge, similarities):
195
+ for dk, sim in zip(knowledge, similarities, strict=False):
190
196
  recency = self._compute_recency_score(dk.last_verified)
191
197
  # Knowledge doesn't have success rate, use 1.0
192
198
  success = 1.0
193
199
  confidence = dk.confidence
194
200
 
195
201
  total = (
196
- self.weights.similarity * sim +
197
- self.weights.recency * recency +
198
- self.weights.success_rate * success +
199
- self.weights.confidence * confidence
202
+ self.weights.similarity * sim
203
+ + self.weights.recency * recency
204
+ + self.weights.success_rate * success
205
+ + self.weights.confidence * confidence
200
206
  )
201
207
 
202
- scored.append(ScoredItem(
203
- item=dk,
204
- score=total,
205
- similarity_score=sim,
206
- recency_score=recency,
207
- success_score=success,
208
- confidence_score=confidence,
209
- ))
208
+ scored.append(
209
+ ScoredItem(
210
+ item=dk,
211
+ score=total,
212
+ similarity_score=sim,
213
+ recency_score=recency,
214
+ success_score=success,
215
+ confidence_score=confidence,
216
+ )
217
+ )
210
218
 
211
219
  return sorted(scored, key=lambda x: -x.score)
212
220
 
@@ -233,7 +241,7 @@ class MemoryScorer:
233
241
  similarities = similarities or [1.0] * len(anti_patterns)
234
242
  scored = []
235
243
 
236
- for ap, sim in zip(anti_patterns, similarities):
244
+ for ap, sim in zip(anti_patterns, similarities, strict=False):
237
245
  recency = self._compute_recency_score(ap.last_seen)
238
246
  # More occurrences = more important to avoid
239
247
  # Normalize occurrence count (cap at 10 for scoring)
@@ -241,20 +249,22 @@ class MemoryScorer:
241
249
  confidence = 1.0
242
250
 
243
251
  total = (
244
- self.weights.similarity * sim +
245
- self.weights.recency * recency +
246
- self.weights.success_rate * success +
247
- self.weights.confidence * confidence
252
+ self.weights.similarity * sim
253
+ + self.weights.recency * recency
254
+ + self.weights.success_rate * success
255
+ + self.weights.confidence * confidence
248
256
  )
249
257
 
250
- scored.append(ScoredItem(
251
- item=ap,
252
- score=total,
253
- similarity_score=sim,
254
- recency_score=recency,
255
- success_score=success,
256
- confidence_score=confidence,
257
- ))
258
+ scored.append(
259
+ ScoredItem(
260
+ item=ap,
261
+ score=total,
262
+ similarity_score=sim,
263
+ recency_score=recency,
264
+ success_score=success,
265
+ confidence_score=confidence,
266
+ )
267
+ )
258
268
 
259
269
  return sorted(scored, key=lambda x: -x.score)
260
270
 
@@ -327,8 +337,8 @@ def compute_composite_score(
327
337
  recency_score = math.pow(0.5, recency_days / recency_half_life)
328
338
 
329
339
  return (
330
- weights.similarity * similarity +
331
- weights.recency * recency_score +
332
- weights.success_rate * success_rate +
333
- weights.confidence * confidence
340
+ weights.similarity * similarity
341
+ + weights.recency * recency_score
342
+ + weights.success_rate * success_rate
343
+ + weights.confidence * confidence
334
344
  )
alma/session/__init__.py CHANGED
@@ -4,12 +4,12 @@ ALMA Session Management Module.
4
4
  Handles session continuity, handoffs, and quick context reload.
5
5
  """
6
6
 
7
+ from alma.session.manager import SessionManager
7
8
  from alma.session.types import (
8
- SessionHandoff,
9
9
  SessionContext,
10
+ SessionHandoff,
10
11
  SessionOutcome,
11
12
  )
12
- from alma.session.manager import SessionManager
13
13
 
14
14
  __all__ = [
15
15
  "SessionHandoff",