alma-memory 0.3.0__py3-none-any.whl → 0.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (77) hide show
  1. alma/__init__.py +99 -29
  2. alma/confidence/__init__.py +47 -0
  3. alma/confidence/engine.py +540 -0
  4. alma/confidence/types.py +351 -0
  5. alma/config/loader.py +3 -2
  6. alma/consolidation/__init__.py +23 -0
  7. alma/consolidation/engine.py +678 -0
  8. alma/consolidation/prompts.py +84 -0
  9. alma/core.py +15 -15
  10. alma/domains/__init__.py +6 -6
  11. alma/domains/factory.py +12 -9
  12. alma/domains/schemas.py +17 -3
  13. alma/domains/types.py +8 -4
  14. alma/events/__init__.py +75 -0
  15. alma/events/emitter.py +284 -0
  16. alma/events/storage_mixin.py +246 -0
  17. alma/events/types.py +126 -0
  18. alma/events/webhook.py +425 -0
  19. alma/exceptions.py +49 -0
  20. alma/extraction/__init__.py +31 -0
  21. alma/extraction/auto_learner.py +264 -0
  22. alma/extraction/extractor.py +420 -0
  23. alma/graph/__init__.py +81 -0
  24. alma/graph/backends/__init__.py +18 -0
  25. alma/graph/backends/memory.py +236 -0
  26. alma/graph/backends/neo4j.py +417 -0
  27. alma/graph/base.py +159 -0
  28. alma/graph/extraction.py +198 -0
  29. alma/graph/store.py +860 -0
  30. alma/harness/__init__.py +4 -4
  31. alma/harness/base.py +18 -9
  32. alma/harness/domains.py +27 -11
  33. alma/initializer/__init__.py +37 -0
  34. alma/initializer/initializer.py +418 -0
  35. alma/initializer/types.py +250 -0
  36. alma/integration/__init__.py +9 -9
  37. alma/integration/claude_agents.py +10 -10
  38. alma/integration/helena.py +32 -22
  39. alma/integration/victor.py +57 -33
  40. alma/learning/__init__.py +27 -27
  41. alma/learning/forgetting.py +198 -148
  42. alma/learning/heuristic_extractor.py +40 -24
  43. alma/learning/protocols.py +62 -14
  44. alma/learning/validation.py +7 -2
  45. alma/mcp/__init__.py +4 -4
  46. alma/mcp/__main__.py +2 -1
  47. alma/mcp/resources.py +17 -16
  48. alma/mcp/server.py +102 -44
  49. alma/mcp/tools.py +174 -37
  50. alma/progress/__init__.py +3 -3
  51. alma/progress/tracker.py +26 -20
  52. alma/progress/types.py +8 -12
  53. alma/py.typed +0 -0
  54. alma/retrieval/__init__.py +11 -11
  55. alma/retrieval/cache.py +20 -21
  56. alma/retrieval/embeddings.py +4 -4
  57. alma/retrieval/engine.py +114 -35
  58. alma/retrieval/scoring.py +73 -63
  59. alma/session/__init__.py +2 -2
  60. alma/session/manager.py +5 -5
  61. alma/session/types.py +5 -4
  62. alma/storage/__init__.py +41 -0
  63. alma/storage/azure_cosmos.py +107 -31
  64. alma/storage/base.py +157 -4
  65. alma/storage/chroma.py +1443 -0
  66. alma/storage/file_based.py +56 -20
  67. alma/storage/pinecone.py +1080 -0
  68. alma/storage/postgresql.py +1452 -0
  69. alma/storage/qdrant.py +1306 -0
  70. alma/storage/sqlite_local.py +376 -31
  71. alma/types.py +62 -14
  72. alma_memory-0.5.0.dist-info/METADATA +905 -0
  73. alma_memory-0.5.0.dist-info/RECORD +76 -0
  74. {alma_memory-0.3.0.dist-info → alma_memory-0.5.0.dist-info}/WHEEL +1 -1
  75. alma_memory-0.3.0.dist-info/METADATA +0 -438
  76. alma_memory-0.3.0.dist-info/RECORD +0 -46
  77. {alma_memory-0.3.0.dist-info → alma_memory-0.5.0.dist-info}/top_level.txt +0 -0
alma/retrieval/engine.py CHANGED
@@ -4,14 +4,14 @@ ALMA Retrieval Engine.
4
4
  Handles semantic search and memory retrieval with scoring and caching.
5
5
  """
6
6
 
7
- import time
8
7
  import logging
9
- from typing import Optional, List, Dict, Any
8
+ import time
9
+ from typing import Any, Dict, List, Optional
10
10
 
11
- from alma.types import MemorySlice, MemoryScope
11
+ from alma.retrieval.cache import NullCache, RetrievalCache
12
+ from alma.retrieval.scoring import MemoryScorer, ScoredItem, ScoringWeights
12
13
  from alma.storage.base import StorageBackend
13
- from alma.retrieval.scoring import MemoryScorer, ScoringWeights, ScoredItem
14
- from alma.retrieval.cache import RetrievalCache, NullCache
14
+ from alma.types import MemoryScope, MemorySlice
15
15
 
16
16
  logger = logging.getLogger(__name__)
17
17
 
@@ -81,18 +81,24 @@ class RetrievalEngine:
81
81
  top_k: int = 5,
82
82
  scope: Optional[MemoryScope] = None,
83
83
  bypass_cache: bool = False,
84
+ include_shared: bool = True,
84
85
  ) -> MemorySlice:
85
86
  """
86
87
  Retrieve relevant memories for a task.
87
88
 
89
+ Supports multi-agent memory sharing: if a scope is provided with
90
+ inherit_from agents, memories from those agents will also be included.
91
+ Shared memories have their origin tracked in the metadata['shared_from'] field.
92
+
88
93
  Args:
89
94
  query: Task description to find relevant memories for
90
95
  agent: Agent requesting memories
91
96
  project_id: Project context
92
97
  user_id: Optional user for preference retrieval
93
98
  top_k: Max items per memory type
94
- scope: Agent's learning scope for filtering
99
+ scope: Agent's learning scope for filtering (enables multi-agent sharing)
95
100
  bypass_cache: Skip cache lookup/storage
101
+ include_shared: If True and scope has inherit_from, include shared memories
96
102
 
97
103
  Returns:
98
104
  MemorySlice with relevant memories, scored and ranked
@@ -110,36 +116,79 @@ class RetrievalEngine:
110
116
  # Generate embedding for query
111
117
  query_embedding = self._get_embedding(query)
112
118
 
113
- # Retrieve raw items from storage (with vector search)
114
- raw_heuristics = self.storage.get_heuristics(
115
- project_id=project_id,
116
- agent=agent,
117
- embedding=query_embedding,
118
- top_k=top_k * 2, # Get extra for scoring/filtering
119
- min_confidence=0.0, # Let scorer handle filtering
120
- )
121
-
122
- raw_outcomes = self.storage.get_outcomes(
123
- project_id=project_id,
124
- agent=agent,
125
- embedding=query_embedding,
126
- top_k=top_k * 2,
127
- success_only=False,
128
- )
119
+ # Determine which agents to query based on scope
120
+ agents_to_query = [agent]
121
+ if include_shared and scope and scope.inherit_from:
122
+ agents_to_query = scope.get_readable_agents()
123
+ logger.debug(
124
+ f"Multi-agent retrieval for {agent}: querying {agents_to_query}"
125
+ )
129
126
 
130
- raw_domain_knowledge = self.storage.get_domain_knowledge(
131
- project_id=project_id,
132
- agent=agent,
133
- embedding=query_embedding,
134
- top_k=top_k * 2,
135
- )
127
+ # Retrieve raw items from storage (with vector search)
128
+ if len(agents_to_query) > 1:
129
+ # Use multi-agent query methods
130
+ raw_heuristics = self.storage.get_heuristics_for_agents(
131
+ project_id=project_id,
132
+ agents=agents_to_query,
133
+ embedding=query_embedding,
134
+ top_k=top_k * 2,
135
+ min_confidence=0.0,
136
+ )
137
+ raw_outcomes = self.storage.get_outcomes_for_agents(
138
+ project_id=project_id,
139
+ agents=agents_to_query,
140
+ embedding=query_embedding,
141
+ top_k=top_k * 2,
142
+ success_only=False,
143
+ )
144
+ raw_domain_knowledge = self.storage.get_domain_knowledge_for_agents(
145
+ project_id=project_id,
146
+ agents=agents_to_query,
147
+ embedding=query_embedding,
148
+ top_k=top_k * 2,
149
+ )
150
+ raw_anti_patterns = self.storage.get_anti_patterns_for_agents(
151
+ project_id=project_id,
152
+ agents=agents_to_query,
153
+ embedding=query_embedding,
154
+ top_k=top_k * 2,
155
+ )
136
156
 
137
- raw_anti_patterns = self.storage.get_anti_patterns(
138
- project_id=project_id,
139
- agent=agent,
140
- embedding=query_embedding,
141
- top_k=top_k * 2,
142
- )
157
+ # Mark shared memories with origin tracking
158
+ raw_heuristics = self._mark_shared_memories(raw_heuristics, agent)
159
+ raw_outcomes = self._mark_shared_memories(raw_outcomes, agent)
160
+ raw_domain_knowledge = self._mark_shared_memories(
161
+ raw_domain_knowledge, agent
162
+ )
163
+ raw_anti_patterns = self._mark_shared_memories(raw_anti_patterns, agent)
164
+ else:
165
+ # Single agent query (original behavior)
166
+ raw_heuristics = self.storage.get_heuristics(
167
+ project_id=project_id,
168
+ agent=agent,
169
+ embedding=query_embedding,
170
+ top_k=top_k * 2,
171
+ min_confidence=0.0,
172
+ )
173
+ raw_outcomes = self.storage.get_outcomes(
174
+ project_id=project_id,
175
+ agent=agent,
176
+ embedding=query_embedding,
177
+ top_k=top_k * 2,
178
+ success_only=False,
179
+ )
180
+ raw_domain_knowledge = self.storage.get_domain_knowledge(
181
+ project_id=project_id,
182
+ agent=agent,
183
+ embedding=query_embedding,
184
+ top_k=top_k * 2,
185
+ )
186
+ raw_anti_patterns = self.storage.get_anti_patterns(
187
+ project_id=project_id,
188
+ agent=agent,
189
+ embedding=query_embedding,
190
+ top_k=top_k * 2,
191
+ )
143
192
 
144
193
  # Score and rank each type
145
194
  scored_heuristics = self.scorer.score_heuristics(raw_heuristics)
@@ -182,6 +231,31 @@ class RetrievalEngine:
182
231
 
183
232
  return result
184
233
 
234
+ def _mark_shared_memories(
235
+ self,
236
+ memories: List[Any],
237
+ requesting_agent: str,
238
+ ) -> List[Any]:
239
+ """
240
+ Mark memories that came from other agents with their origin.
241
+
242
+ Adds 'shared_from' to metadata for memories not owned by requesting_agent.
243
+ This maintains write isolation - only the owning agent can modify their memories.
244
+
245
+ Args:
246
+ memories: List of memory objects (Heuristic, Outcome, etc.)
247
+ requesting_agent: The agent that requested the memories
248
+
249
+ Returns:
250
+ Same memories with shared_from metadata added where applicable
251
+ """
252
+ for memory in memories:
253
+ if hasattr(memory, "agent") and memory.agent != requesting_agent:
254
+ if not hasattr(memory, "metadata") or memory.metadata is None:
255
+ memory.metadata = {}
256
+ memory.metadata["shared_from"] = memory.agent
257
+ return memories
258
+
185
259
  def _extract_top_k(
186
260
  self,
187
261
  scored_items: List[ScoredItem],
@@ -217,12 +291,15 @@ class RetrievalEngine:
217
291
  """Initialize the embedding model based on provider config."""
218
292
  if self.embedding_provider == "azure":
219
293
  from alma.retrieval.embeddings import AzureEmbedder
294
+
220
295
  return AzureEmbedder()
221
296
  elif self.embedding_provider == "mock":
222
297
  from alma.retrieval.embeddings import MockEmbedder
298
+
223
299
  return MockEmbedder()
224
300
  else:
225
301
  from alma.retrieval.embeddings import LocalEmbedder
302
+
226
303
  return LocalEmbedder()
227
304
 
228
305
  def invalidate_cache(
@@ -280,7 +357,9 @@ class RetrievalEngine:
280
357
  self.scorer.weights = ScoringWeights(
281
358
  similarity=similarity if similarity is not None else current.similarity,
282
359
  recency=recency if recency is not None else current.recency,
283
- success_rate=success_rate if success_rate is not None else current.success_rate,
360
+ success_rate=(
361
+ success_rate if success_rate is not None else current.success_rate
362
+ ),
284
363
  confidence=confidence if confidence is not None else current.confidence,
285
364
  )
286
365
  # Clear cache since scoring changed
alma/retrieval/scoring.py CHANGED
@@ -5,11 +5,11 @@ Combines semantic similarity, recency, and success rate for optimal retrieval.
5
5
  """
6
6
 
7
7
  import math
8
- from datetime import datetime, timezone
9
- from typing import List, Dict, Any, Optional, TypeVar, Callable
10
8
  from dataclasses import dataclass
9
+ from datetime import datetime, timezone
10
+ from typing import Any, List, Optional
11
11
 
12
- from alma.types import Heuristic, Outcome, DomainKnowledge, AntiPattern
12
+ from alma.types import AntiPattern, DomainKnowledge, Heuristic, Outcome
13
13
 
14
14
 
15
15
  @dataclass
@@ -19,10 +19,11 @@ class ScoringWeights:
19
19
 
20
20
  All weights should sum to 1.0 for normalized scores.
21
21
  """
22
- similarity: float = 0.4 # Semantic relevance to query
23
- recency: float = 0.3 # How recently the memory was validated/used
24
- success_rate: float = 0.2 # Historical success rate
25
- confidence: float = 0.1 # Stored confidence score
22
+
23
+ similarity: float = 0.4 # Semantic relevance to query
24
+ recency: float = 0.3 # How recently the memory was validated/used
25
+ success_rate: float = 0.2 # Historical success rate
26
+ confidence: float = 0.1 # Stored confidence score
26
27
 
27
28
  def __post_init__(self):
28
29
  """Validate weights sum to approximately 1.0."""
@@ -38,6 +39,7 @@ class ScoringWeights:
38
39
  @dataclass
39
40
  class ScoredItem:
40
41
  """A memory item with its computed score."""
42
+
41
43
  item: Any
42
44
  score: float
43
45
  similarity_score: float
@@ -93,26 +95,28 @@ class MemoryScorer:
93
95
  similarities = similarities or [1.0] * len(heuristics)
94
96
  scored = []
95
97
 
96
- for h, sim in zip(heuristics, similarities):
98
+ for h, sim in zip(heuristics, similarities, strict=False):
97
99
  recency = self._compute_recency_score(h.last_validated)
98
100
  success = h.success_rate
99
101
  confidence = h.confidence
100
102
 
101
103
  total = (
102
- self.weights.similarity * sim +
103
- self.weights.recency * recency +
104
- self.weights.success_rate * success +
105
- self.weights.confidence * confidence
104
+ self.weights.similarity * sim
105
+ + self.weights.recency * recency
106
+ + self.weights.success_rate * success
107
+ + self.weights.confidence * confidence
106
108
  )
107
109
 
108
- scored.append(ScoredItem(
109
- item=h,
110
- score=total,
111
- similarity_score=sim,
112
- recency_score=recency,
113
- success_score=success,
114
- confidence_score=confidence,
115
- ))
110
+ scored.append(
111
+ ScoredItem(
112
+ item=h,
113
+ score=total,
114
+ similarity_score=sim,
115
+ recency_score=recency,
116
+ success_score=success,
117
+ confidence_score=confidence,
118
+ )
119
+ )
116
120
 
117
121
  return sorted(scored, key=lambda x: -x.score)
118
122
 
@@ -140,7 +144,7 @@ class MemoryScorer:
140
144
  similarities = similarities or [1.0] * len(outcomes)
141
145
  scored = []
142
146
 
143
- for o, sim in zip(outcomes, similarities):
147
+ for o, sim in zip(outcomes, similarities, strict=False):
144
148
  recency = self._compute_recency_score(o.timestamp)
145
149
  # Success gets full score, failure gets partial (still useful to learn from)
146
150
  success = 1.0 if o.success else 0.3
@@ -148,20 +152,22 @@ class MemoryScorer:
148
152
  confidence = 1.0
149
153
 
150
154
  total = (
151
- self.weights.similarity * sim +
152
- self.weights.recency * recency +
153
- self.weights.success_rate * success +
154
- self.weights.confidence * confidence
155
+ self.weights.similarity * sim
156
+ + self.weights.recency * recency
157
+ + self.weights.success_rate * success
158
+ + self.weights.confidence * confidence
155
159
  )
156
160
 
157
- scored.append(ScoredItem(
158
- item=o,
159
- score=total,
160
- similarity_score=sim,
161
- recency_score=recency,
162
- success_score=success,
163
- confidence_score=confidence,
164
- ))
161
+ scored.append(
162
+ ScoredItem(
163
+ item=o,
164
+ score=total,
165
+ similarity_score=sim,
166
+ recency_score=recency,
167
+ success_score=success,
168
+ confidence_score=confidence,
169
+ )
170
+ )
165
171
 
166
172
  return sorted(scored, key=lambda x: -x.score)
167
173
 
@@ -186,27 +192,29 @@ class MemoryScorer:
186
192
  similarities = similarities or [1.0] * len(knowledge)
187
193
  scored = []
188
194
 
189
- for dk, sim in zip(knowledge, similarities):
195
+ for dk, sim in zip(knowledge, similarities, strict=False):
190
196
  recency = self._compute_recency_score(dk.last_verified)
191
197
  # Knowledge doesn't have success rate, use 1.0
192
198
  success = 1.0
193
199
  confidence = dk.confidence
194
200
 
195
201
  total = (
196
- self.weights.similarity * sim +
197
- self.weights.recency * recency +
198
- self.weights.success_rate * success +
199
- self.weights.confidence * confidence
202
+ self.weights.similarity * sim
203
+ + self.weights.recency * recency
204
+ + self.weights.success_rate * success
205
+ + self.weights.confidence * confidence
200
206
  )
201
207
 
202
- scored.append(ScoredItem(
203
- item=dk,
204
- score=total,
205
- similarity_score=sim,
206
- recency_score=recency,
207
- success_score=success,
208
- confidence_score=confidence,
209
- ))
208
+ scored.append(
209
+ ScoredItem(
210
+ item=dk,
211
+ score=total,
212
+ similarity_score=sim,
213
+ recency_score=recency,
214
+ success_score=success,
215
+ confidence_score=confidence,
216
+ )
217
+ )
210
218
 
211
219
  return sorted(scored, key=lambda x: -x.score)
212
220
 
@@ -233,7 +241,7 @@ class MemoryScorer:
233
241
  similarities = similarities or [1.0] * len(anti_patterns)
234
242
  scored = []
235
243
 
236
- for ap, sim in zip(anti_patterns, similarities):
244
+ for ap, sim in zip(anti_patterns, similarities, strict=False):
237
245
  recency = self._compute_recency_score(ap.last_seen)
238
246
  # More occurrences = more important to avoid
239
247
  # Normalize occurrence count (cap at 10 for scoring)
@@ -241,20 +249,22 @@ class MemoryScorer:
241
249
  confidence = 1.0
242
250
 
243
251
  total = (
244
- self.weights.similarity * sim +
245
- self.weights.recency * recency +
246
- self.weights.success_rate * success +
247
- self.weights.confidence * confidence
252
+ self.weights.similarity * sim
253
+ + self.weights.recency * recency
254
+ + self.weights.success_rate * success
255
+ + self.weights.confidence * confidence
248
256
  )
249
257
 
250
- scored.append(ScoredItem(
251
- item=ap,
252
- score=total,
253
- similarity_score=sim,
254
- recency_score=recency,
255
- success_score=success,
256
- confidence_score=confidence,
257
- ))
258
+ scored.append(
259
+ ScoredItem(
260
+ item=ap,
261
+ score=total,
262
+ similarity_score=sim,
263
+ recency_score=recency,
264
+ success_score=success,
265
+ confidence_score=confidence,
266
+ )
267
+ )
258
268
 
259
269
  return sorted(scored, key=lambda x: -x.score)
260
270
 
@@ -327,8 +337,8 @@ def compute_composite_score(
327
337
  recency_score = math.pow(0.5, recency_days / recency_half_life)
328
338
 
329
339
  return (
330
- weights.similarity * similarity +
331
- weights.recency * recency_score +
332
- weights.success_rate * success_rate +
333
- weights.confidence * confidence
340
+ weights.similarity * similarity
341
+ + weights.recency * recency_score
342
+ + weights.success_rate * success_rate
343
+ + weights.confidence * confidence
334
344
  )
alma/session/__init__.py CHANGED
@@ -4,12 +4,12 @@ ALMA Session Management Module.
4
4
  Handles session continuity, handoffs, and quick context reload.
5
5
  """
6
6
 
7
+ from alma.session.manager import SessionManager
7
8
  from alma.session.types import (
8
- SessionHandoff,
9
9
  SessionContext,
10
+ SessionHandoff,
10
11
  SessionOutcome,
11
12
  )
12
- from alma.session.manager import SessionManager
13
13
 
14
14
  __all__ = [
15
15
  "SessionHandoff",
alma/session/manager.py CHANGED
@@ -4,14 +4,14 @@ Session Manager.
4
4
  Manages session continuity, handoffs, and quick context reload.
5
5
  """
6
6
 
7
- from datetime import datetime, timezone
8
- from typing import Optional, List, Dict, Any, Callable
9
- import uuid
10
7
  import logging
8
+ import uuid
9
+ from datetime import datetime, timezone
10
+ from typing import Any, Callable, Dict, List, Optional
11
11
 
12
12
  from alma.session.types import (
13
- SessionHandoff,
14
13
  SessionContext,
14
+ SessionHandoff,
15
15
  SessionOutcome,
16
16
  )
17
17
 
@@ -281,7 +281,7 @@ class SessionManager:
281
281
 
282
282
  # Trim to max
283
283
  if len(self._handoffs[agent]) > self.max_handoffs:
284
- self._handoffs[agent] = self._handoffs[agent][-self.max_handoffs:]
284
+ self._handoffs[agent] = self._handoffs[agent][-self.max_handoffs :]
285
285
 
286
286
  # TODO: Persist to storage backend when integrated
287
287
 
alma/session/types.py CHANGED
@@ -4,11 +4,10 @@ Session Management Types.
4
4
  Data models for session continuity and handoffs.
5
5
  """
6
6
 
7
+ import uuid
7
8
  from dataclasses import dataclass, field
8
9
  from datetime import datetime, timezone
9
- from typing import Optional, List, Dict, Any, Literal
10
- import uuid
11
-
10
+ from typing import Any, Dict, List, Literal, Optional
12
11
 
13
12
  SessionOutcome = Literal["success", "failure", "interrupted", "unknown"]
14
13
 
@@ -276,7 +275,9 @@ class SessionContext:
276
275
  if "branch" in self.codebase_state:
277
276
  lines.append(f"Branch: {self.codebase_state['branch']}")
278
277
  if "uncommitted" in self.codebase_state:
279
- lines.append(f"Uncommitted Changes: {self.codebase_state['uncommitted']}")
278
+ lines.append(
279
+ f"Uncommitted Changes: {self.codebase_state['uncommitted']}"
280
+ )
280
281
 
281
282
  # Rules
282
283
  if self.rules_of_engagement:
alma/storage/__init__.py CHANGED
@@ -7,14 +7,55 @@ from alma.storage.sqlite_local import SQLiteStorage
7
7
  # Azure Cosmos DB is optional - requires azure-cosmos package
8
8
  try:
9
9
  from alma.storage.azure_cosmos import AzureCosmosStorage
10
+
10
11
  _HAS_AZURE = True
11
12
  except ImportError:
12
13
  AzureCosmosStorage = None # type: ignore
13
14
  _HAS_AZURE = False
14
15
 
16
+ # PostgreSQL is optional - requires psycopg package
17
+ try:
18
+ from alma.storage.postgresql import PostgreSQLStorage
19
+
20
+ _HAS_POSTGRES = True
21
+ except ImportError:
22
+ PostgreSQLStorage = None # type: ignore
23
+ _HAS_POSTGRES = False
24
+
25
+ # Qdrant is optional - requires qdrant-client package
26
+ try:
27
+ from alma.storage.qdrant import QdrantStorage
28
+
29
+ _HAS_QDRANT = True
30
+ except ImportError:
31
+ QdrantStorage = None # type: ignore
32
+ _HAS_QDRANT = False
33
+
34
+ # ChromaDB is optional - requires chromadb package
35
+ try:
36
+ from alma.storage.chroma import ChromaStorage
37
+
38
+ _HAS_CHROMA = True
39
+ except ImportError:
40
+ ChromaStorage = None # type: ignore
41
+ _HAS_CHROMA = False
42
+
43
+ # Pinecone is optional - requires pinecone-client package
44
+ try:
45
+ from alma.storage.pinecone import PineconeStorage
46
+
47
+ _HAS_PINECONE = True
48
+ except ImportError:
49
+ PineconeStorage = None # type: ignore
50
+ _HAS_PINECONE = False
51
+
15
52
  __all__ = [
16
53
  "StorageBackend",
17
54
  "FileBasedStorage",
18
55
  "SQLiteStorage",
19
56
  "AzureCosmosStorage",
57
+ "PostgreSQLStorage",
58
+ "QdrantStorage",
59
+ "ChromaStorage",
60
+ "PineconeStorage",
20
61
  ]