alma-memory 0.3.0__py3-none-any.whl → 0.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (77) hide show
  1. alma/__init__.py +99 -29
  2. alma/confidence/__init__.py +47 -0
  3. alma/confidence/engine.py +540 -0
  4. alma/confidence/types.py +351 -0
  5. alma/config/loader.py +3 -2
  6. alma/consolidation/__init__.py +23 -0
  7. alma/consolidation/engine.py +678 -0
  8. alma/consolidation/prompts.py +84 -0
  9. alma/core.py +15 -15
  10. alma/domains/__init__.py +6 -6
  11. alma/domains/factory.py +12 -9
  12. alma/domains/schemas.py +17 -3
  13. alma/domains/types.py +8 -4
  14. alma/events/__init__.py +75 -0
  15. alma/events/emitter.py +284 -0
  16. alma/events/storage_mixin.py +246 -0
  17. alma/events/types.py +126 -0
  18. alma/events/webhook.py +425 -0
  19. alma/exceptions.py +49 -0
  20. alma/extraction/__init__.py +31 -0
  21. alma/extraction/auto_learner.py +264 -0
  22. alma/extraction/extractor.py +420 -0
  23. alma/graph/__init__.py +81 -0
  24. alma/graph/backends/__init__.py +18 -0
  25. alma/graph/backends/memory.py +236 -0
  26. alma/graph/backends/neo4j.py +417 -0
  27. alma/graph/base.py +159 -0
  28. alma/graph/extraction.py +198 -0
  29. alma/graph/store.py +860 -0
  30. alma/harness/__init__.py +4 -4
  31. alma/harness/base.py +18 -9
  32. alma/harness/domains.py +27 -11
  33. alma/initializer/__init__.py +37 -0
  34. alma/initializer/initializer.py +418 -0
  35. alma/initializer/types.py +250 -0
  36. alma/integration/__init__.py +9 -9
  37. alma/integration/claude_agents.py +10 -10
  38. alma/integration/helena.py +32 -22
  39. alma/integration/victor.py +57 -33
  40. alma/learning/__init__.py +27 -27
  41. alma/learning/forgetting.py +198 -148
  42. alma/learning/heuristic_extractor.py +40 -24
  43. alma/learning/protocols.py +62 -14
  44. alma/learning/validation.py +7 -2
  45. alma/mcp/__init__.py +4 -4
  46. alma/mcp/__main__.py +2 -1
  47. alma/mcp/resources.py +17 -16
  48. alma/mcp/server.py +102 -44
  49. alma/mcp/tools.py +174 -37
  50. alma/progress/__init__.py +3 -3
  51. alma/progress/tracker.py +26 -20
  52. alma/progress/types.py +8 -12
  53. alma/py.typed +0 -0
  54. alma/retrieval/__init__.py +11 -11
  55. alma/retrieval/cache.py +20 -21
  56. alma/retrieval/embeddings.py +4 -4
  57. alma/retrieval/engine.py +114 -35
  58. alma/retrieval/scoring.py +73 -63
  59. alma/session/__init__.py +2 -2
  60. alma/session/manager.py +5 -5
  61. alma/session/types.py +5 -4
  62. alma/storage/__init__.py +41 -0
  63. alma/storage/azure_cosmos.py +107 -31
  64. alma/storage/base.py +157 -4
  65. alma/storage/chroma.py +1443 -0
  66. alma/storage/file_based.py +56 -20
  67. alma/storage/pinecone.py +1080 -0
  68. alma/storage/postgresql.py +1452 -0
  69. alma/storage/qdrant.py +1306 -0
  70. alma/storage/sqlite_local.py +376 -31
  71. alma/types.py +62 -14
  72. alma_memory-0.5.0.dist-info/METADATA +905 -0
  73. alma_memory-0.5.0.dist-info/RECORD +76 -0
  74. {alma_memory-0.3.0.dist-info → alma_memory-0.5.0.dist-info}/WHEEL +1 -1
  75. alma_memory-0.3.0.dist-info/METADATA +0 -438
  76. alma_memory-0.3.0.dist-info/RECORD +0 -46
  77. {alma_memory-0.3.0.dist-info → alma_memory-0.5.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,351 @@
1
+ """
2
+ Confidence Types.
3
+
4
+ Forward-looking confidence signals for strategies.
5
+ Inspired by Ilya Sutskever's insight: emotions are forward-looking value functions.
6
+ """
7
+
8
+ import uuid
9
+ from dataclasses import dataclass, field
10
+ from datetime import datetime, timezone
11
+ from typing import Any, Dict, List, Literal, Optional
12
+
13
+
14
+ @dataclass
15
+ class RiskSignal:
16
+ """
17
+ A risk indicator for a strategy.
18
+
19
+ Risks are signals that a strategy may not work in the current context.
20
+ They can come from:
21
+ - Similar past failures
22
+ - Untested contexts
23
+ - High complexity
24
+ - Missing prerequisites
25
+ """
26
+
27
+ id: str = field(default_factory=lambda: str(uuid.uuid4()))
28
+
29
+ # Type of risk
30
+ signal_type: str = (
31
+ "" # "similar_to_failure", "untested_context", "high_complexity", etc.
32
+ )
33
+
34
+ # Human-readable description
35
+ description: str = ""
36
+
37
+ # Severity: 0.0 = low risk, 1.0 = critical risk
38
+ severity: float = 0.0
39
+
40
+ # What triggered this signal
41
+ source: str = "" # "heuristic:h123", "anti_pattern:ap456", "context_analysis"
42
+
43
+ # Related memory IDs
44
+ related_memories: List[str] = field(default_factory=list)
45
+
46
+ # Metadata
47
+ metadata: Dict[str, Any] = field(default_factory=dict)
48
+ detected_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
49
+
50
+ def to_dict(self) -> Dict[str, Any]:
51
+ """Serialize to dictionary."""
52
+ return {
53
+ "id": self.id,
54
+ "signal_type": self.signal_type,
55
+ "description": self.description,
56
+ "severity": self.severity,
57
+ "source": self.source,
58
+ "related_memories": self.related_memories,
59
+ "metadata": self.metadata,
60
+ "detected_at": self.detected_at.isoformat(),
61
+ }
62
+
63
+
64
+ @dataclass
65
+ class OpportunitySignal:
66
+ """
67
+ An opportunity indicator for a strategy.
68
+
69
+ Opportunities are signals that a strategy is likely to succeed.
70
+ They can come from:
71
+ - Proven patterns with high success rate
72
+ - High similarity to past successes
73
+ - Recent successful uses
74
+ - Strong prerequisites met
75
+ """
76
+
77
+ id: str = field(default_factory=lambda: str(uuid.uuid4()))
78
+
79
+ # Type of opportunity
80
+ signal_type: str = "" # "proven_pattern", "high_similarity", "recent_success", etc.
81
+
82
+ # Human-readable description
83
+ description: str = ""
84
+
85
+ # Strength: 0.0 = weak signal, 1.0 = strong signal
86
+ strength: float = 0.0
87
+
88
+ # What triggered this signal
89
+ source: str = "" # "heuristic:h123", "outcome:o456", "pattern_match"
90
+
91
+ # Related memory IDs
92
+ related_memories: List[str] = field(default_factory=list)
93
+
94
+ # Metadata
95
+ metadata: Dict[str, Any] = field(default_factory=dict)
96
+ detected_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
97
+
98
+ def to_dict(self) -> Dict[str, Any]:
99
+ """Serialize to dictionary."""
100
+ return {
101
+ "id": self.id,
102
+ "signal_type": self.signal_type,
103
+ "description": self.description,
104
+ "strength": self.strength,
105
+ "source": self.source,
106
+ "related_memories": self.related_memories,
107
+ "metadata": self.metadata,
108
+ "detected_at": self.detected_at.isoformat(),
109
+ }
110
+
111
+
112
+ # Recommendation levels
113
+ Recommendation = Literal["strong_yes", "yes", "neutral", "caution", "avoid"]
114
+
115
+
116
+ @dataclass
117
+ class ConfidenceSignal:
118
+ """
119
+ Forward-looking confidence assessment for a strategy.
120
+
121
+ Combines backward-looking metrics (historical success) with
122
+ forward-looking predictions (expected success in current context).
123
+
124
+ This is the "gut feeling" that tells an agent whether a strategy
125
+ is likely to work before trying it.
126
+ """
127
+
128
+ id: str = field(default_factory=lambda: str(uuid.uuid4()))
129
+
130
+ # What we're assessing
131
+ strategy: str = ""
132
+ context: str = ""
133
+ agent: str = ""
134
+
135
+ # Optional link to existing heuristic
136
+ heuristic_id: Optional[str] = None
137
+
138
+ # Backward-looking metrics (from historical data)
139
+ historical_success_rate: float = 0.0 # 0-1, based on past outcomes
140
+ occurrence_count: int = 0 # How many times this strategy was tried
141
+
142
+ # Forward-looking predictions (computed for current context)
143
+ predicted_success: float = 0.5 # Expected success in THIS context
144
+ uncertainty: float = (
145
+ 0.5 # How uncertain is the prediction (0=certain, 1=very uncertain)
146
+ )
147
+ context_similarity: float = 0.0 # How similar is current context to past successes
148
+
149
+ # Risk signals
150
+ risk_signals: List[RiskSignal] = field(default_factory=list)
151
+ total_risk_score: float = 0.0 # Aggregated risk (0=no risk, 1=high risk)
152
+
153
+ # Opportunity signals
154
+ opportunity_signals: List[OpportunitySignal] = field(default_factory=list)
155
+ total_opportunity_score: float = 0.0 # Aggregated opportunity (0=none, 1=high)
156
+
157
+ # Combined assessment
158
+ confidence_score: float = 0.5 # Final weighted score (0-1)
159
+ recommendation: Recommendation = "neutral"
160
+
161
+ # Explanation
162
+ reasoning: str = ""
163
+
164
+ # Metadata
165
+ metadata: Dict[str, Any] = field(default_factory=dict)
166
+ assessed_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
167
+
168
+ @classmethod
169
+ def create(
170
+ cls,
171
+ strategy: str,
172
+ context: str,
173
+ agent: str,
174
+ heuristic_id: Optional[str] = None,
175
+ ) -> "ConfidenceSignal":
176
+ """Create a new confidence signal."""
177
+ return cls(
178
+ strategy=strategy,
179
+ context=context,
180
+ agent=agent,
181
+ heuristic_id=heuristic_id,
182
+ )
183
+
184
+ def add_risk(
185
+ self,
186
+ signal_type: str,
187
+ description: str,
188
+ severity: float,
189
+ source: str = "",
190
+ ) -> RiskSignal:
191
+ """Add a risk signal."""
192
+ risk = RiskSignal(
193
+ signal_type=signal_type,
194
+ description=description,
195
+ severity=severity,
196
+ source=source,
197
+ )
198
+ self.risk_signals.append(risk)
199
+ self._recalculate_scores()
200
+ return risk
201
+
202
+ def add_opportunity(
203
+ self,
204
+ signal_type: str,
205
+ description: str,
206
+ strength: float,
207
+ source: str = "",
208
+ ) -> OpportunitySignal:
209
+ """Add an opportunity signal."""
210
+ opportunity = OpportunitySignal(
211
+ signal_type=signal_type,
212
+ description=description,
213
+ strength=strength,
214
+ source=source,
215
+ )
216
+ self.opportunity_signals.append(opportunity)
217
+ self._recalculate_scores()
218
+ return opportunity
219
+
220
+ def _recalculate_scores(self) -> None:
221
+ """Recalculate total scores and recommendation."""
222
+ # Aggregate risk
223
+ if self.risk_signals:
224
+ # Use max risk as the dominant signal
225
+ self.total_risk_score = max(r.severity for r in self.risk_signals)
226
+ else:
227
+ self.total_risk_score = 0.0
228
+
229
+ # Aggregate opportunity
230
+ if self.opportunity_signals:
231
+ # Use max opportunity as the dominant signal
232
+ self.total_opportunity_score = max(
233
+ o.strength for o in self.opportunity_signals
234
+ )
235
+ else:
236
+ self.total_opportunity_score = 0.0
237
+
238
+ # Combined confidence score
239
+ # Weighs historical success, predicted success, and risk/opportunity balance
240
+ base_confidence = (
241
+ 0.3 * self.historical_success_rate
242
+ + 0.4 * self.predicted_success
243
+ + 0.15 * self.context_similarity
244
+ + 0.15 * (1.0 - self.uncertainty)
245
+ )
246
+
247
+ # Adjust for risk/opportunity
248
+ risk_adjustment = -0.2 * self.total_risk_score
249
+ opportunity_adjustment = 0.2 * self.total_opportunity_score
250
+
251
+ self.confidence_score = max(
252
+ 0.0, min(1.0, base_confidence + risk_adjustment + opportunity_adjustment)
253
+ )
254
+
255
+ # Determine recommendation
256
+ self._update_recommendation()
257
+
258
+ def _update_recommendation(self) -> None:
259
+ """Update recommendation based on confidence score and signals."""
260
+ # High risk signals can override confidence
261
+ if self.total_risk_score >= 0.8:
262
+ self.recommendation = "avoid"
263
+ elif self.total_risk_score >= 0.6:
264
+ self.recommendation = "caution"
265
+ elif self.confidence_score >= 0.8:
266
+ self.recommendation = "strong_yes"
267
+ elif self.confidence_score >= 0.6:
268
+ self.recommendation = "yes"
269
+ elif self.confidence_score >= 0.4:
270
+ self.recommendation = "neutral"
271
+ elif self.confidence_score >= 0.2:
272
+ self.recommendation = "caution"
273
+ else:
274
+ self.recommendation = "avoid"
275
+
276
+ def to_prompt(self) -> str:
277
+ """Format confidence signal for prompt injection."""
278
+ lines = [
279
+ f"## Confidence Assessment: {self.strategy[:50]}...",
280
+ f"**Recommendation: {self.recommendation.upper()}** (score: {self.confidence_score:.2f})",
281
+ "",
282
+ ]
283
+
284
+ # Metrics
285
+ lines.append("### Metrics")
286
+ lines.append(
287
+ f"- Historical success: {self.historical_success_rate:.0%} ({self.occurrence_count} uses)"
288
+ )
289
+ lines.append(f"- Predicted success: {self.predicted_success:.0%}")
290
+ lines.append(f"- Context similarity: {self.context_similarity:.0%}")
291
+ lines.append(f"- Uncertainty: {self.uncertainty:.0%}")
292
+ lines.append("")
293
+
294
+ # Risks
295
+ if self.risk_signals:
296
+ lines.append("### Risks")
297
+ for risk in self.risk_signals:
298
+ severity_label = (
299
+ "HIGH"
300
+ if risk.severity >= 0.7
301
+ else "MEDIUM"
302
+ if risk.severity >= 0.4
303
+ else "LOW"
304
+ )
305
+ lines.append(f"- [{severity_label}] {risk.description}")
306
+ lines.append("")
307
+
308
+ # Opportunities
309
+ if self.opportunity_signals:
310
+ lines.append("### Opportunities")
311
+ for opp in self.opportunity_signals:
312
+ strength_label = (
313
+ "STRONG"
314
+ if opp.strength >= 0.7
315
+ else "MODERATE"
316
+ if opp.strength >= 0.4
317
+ else "WEAK"
318
+ )
319
+ lines.append(f"- [{strength_label}] {opp.description}")
320
+ lines.append("")
321
+
322
+ # Reasoning
323
+ if self.reasoning:
324
+ lines.append("### Analysis")
325
+ lines.append(self.reasoning)
326
+
327
+ return "\n".join(lines)
328
+
329
+ def to_dict(self) -> Dict[str, Any]:
330
+ """Serialize to dictionary."""
331
+ return {
332
+ "id": self.id,
333
+ "strategy": self.strategy,
334
+ "context": self.context,
335
+ "agent": self.agent,
336
+ "heuristic_id": self.heuristic_id,
337
+ "historical_success_rate": self.historical_success_rate,
338
+ "occurrence_count": self.occurrence_count,
339
+ "predicted_success": self.predicted_success,
340
+ "uncertainty": self.uncertainty,
341
+ "context_similarity": self.context_similarity,
342
+ "risk_signals": [r.to_dict() for r in self.risk_signals],
343
+ "total_risk_score": self.total_risk_score,
344
+ "opportunity_signals": [o.to_dict() for o in self.opportunity_signals],
345
+ "total_opportunity_score": self.total_opportunity_score,
346
+ "confidence_score": self.confidence_score,
347
+ "recommendation": self.recommendation,
348
+ "reasoning": self.reasoning,
349
+ "metadata": self.metadata,
350
+ "assessed_at": self.assessed_at.isoformat(),
351
+ }
alma/config/loader.py CHANGED
@@ -5,10 +5,10 @@ Handles loading configuration from files and environment variables,
5
5
  with support for Azure Key Vault secret resolution.
6
6
  """
7
7
 
8
- import os
9
8
  import logging
9
+ import os
10
10
  from pathlib import Path
11
- from typing import Dict, Any, Optional
11
+ from typing import Any, Dict
12
12
 
13
13
  import yaml
14
14
 
@@ -78,6 +78,7 @@ class ConfigLoader:
78
78
 
79
79
  # Handle ${VAR} patterns
80
80
  import re
81
+
81
82
  pattern = r"\$\{([^}]+)\}"
82
83
 
83
84
  def replace(match):
@@ -0,0 +1,23 @@
1
+ """
2
+ ALMA Consolidation Module.
3
+
4
+ Provides memory consolidation capabilities for deduplicating and merging
5
+ similar memories, inspired by Mem0's core innovation.
6
+ """
7
+
8
+ from alma.consolidation.engine import ConsolidationEngine, ConsolidationResult
9
+ from alma.consolidation.prompts import (
10
+ MERGE_ANTI_PATTERNS_PROMPT,
11
+ MERGE_DOMAIN_KNOWLEDGE_PROMPT,
12
+ MERGE_HEURISTICS_PROMPT,
13
+ MERGE_OUTCOMES_PROMPT,
14
+ )
15
+
16
+ __all__ = [
17
+ "ConsolidationEngine",
18
+ "ConsolidationResult",
19
+ "MERGE_HEURISTICS_PROMPT",
20
+ "MERGE_DOMAIN_KNOWLEDGE_PROMPT",
21
+ "MERGE_ANTI_PATTERNS_PROMPT",
22
+ "MERGE_OUTCOMES_PROMPT",
23
+ ]