alma-memory 0.2.0__py3-none-any.whl → 0.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,331 @@
1
+ """
2
+ Confidence Types.
3
+
4
+ Forward-looking confidence signals for strategies.
5
+ Inspired by Ilya Sutskever's insight: emotions are forward-looking value functions.
6
+ """
7
+
8
+ from dataclasses import dataclass, field
9
+ from datetime import datetime, timezone
10
+ from typing import Any, Dict, List, Literal, Optional
11
+ import uuid
12
+
13
+
14
+ @dataclass
15
+ class RiskSignal:
16
+ """
17
+ A risk indicator for a strategy.
18
+
19
+ Risks are signals that a strategy may not work in the current context.
20
+ They can come from:
21
+ - Similar past failures
22
+ - Untested contexts
23
+ - High complexity
24
+ - Missing prerequisites
25
+ """
26
+
27
+ id: str = field(default_factory=lambda: str(uuid.uuid4()))
28
+
29
+ # Type of risk
30
+ signal_type: str = "" # "similar_to_failure", "untested_context", "high_complexity", etc.
31
+
32
+ # Human-readable description
33
+ description: str = ""
34
+
35
+ # Severity: 0.0 = low risk, 1.0 = critical risk
36
+ severity: float = 0.0
37
+
38
+ # What triggered this signal
39
+ source: str = "" # "heuristic:h123", "anti_pattern:ap456", "context_analysis"
40
+
41
+ # Related memory IDs
42
+ related_memories: List[str] = field(default_factory=list)
43
+
44
+ # Metadata
45
+ metadata: Dict[str, Any] = field(default_factory=dict)
46
+ detected_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
47
+
48
+ def to_dict(self) -> Dict[str, Any]:
49
+ """Serialize to dictionary."""
50
+ return {
51
+ "id": self.id,
52
+ "signal_type": self.signal_type,
53
+ "description": self.description,
54
+ "severity": self.severity,
55
+ "source": self.source,
56
+ "related_memories": self.related_memories,
57
+ "metadata": self.metadata,
58
+ "detected_at": self.detected_at.isoformat(),
59
+ }
60
+
61
+
62
+ @dataclass
63
+ class OpportunitySignal:
64
+ """
65
+ An opportunity indicator for a strategy.
66
+
67
+ Opportunities are signals that a strategy is likely to succeed.
68
+ They can come from:
69
+ - Proven patterns with high success rate
70
+ - High similarity to past successes
71
+ - Recent successful uses
72
+ - Strong prerequisites met
73
+ """
74
+
75
+ id: str = field(default_factory=lambda: str(uuid.uuid4()))
76
+
77
+ # Type of opportunity
78
+ signal_type: str = "" # "proven_pattern", "high_similarity", "recent_success", etc.
79
+
80
+ # Human-readable description
81
+ description: str = ""
82
+
83
+ # Strength: 0.0 = weak signal, 1.0 = strong signal
84
+ strength: float = 0.0
85
+
86
+ # What triggered this signal
87
+ source: str = "" # "heuristic:h123", "outcome:o456", "pattern_match"
88
+
89
+ # Related memory IDs
90
+ related_memories: List[str] = field(default_factory=list)
91
+
92
+ # Metadata
93
+ metadata: Dict[str, Any] = field(default_factory=dict)
94
+ detected_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
95
+
96
+ def to_dict(self) -> Dict[str, Any]:
97
+ """Serialize to dictionary."""
98
+ return {
99
+ "id": self.id,
100
+ "signal_type": self.signal_type,
101
+ "description": self.description,
102
+ "strength": self.strength,
103
+ "source": self.source,
104
+ "related_memories": self.related_memories,
105
+ "metadata": self.metadata,
106
+ "detected_at": self.detected_at.isoformat(),
107
+ }
108
+
109
+
110
+ # Recommendation levels
111
+ Recommendation = Literal["strong_yes", "yes", "neutral", "caution", "avoid"]
112
+
113
+
114
+ @dataclass
115
+ class ConfidenceSignal:
116
+ """
117
+ Forward-looking confidence assessment for a strategy.
118
+
119
+ Combines backward-looking metrics (historical success) with
120
+ forward-looking predictions (expected success in current context).
121
+
122
+ This is the "gut feeling" that tells an agent whether a strategy
123
+ is likely to work before trying it.
124
+ """
125
+
126
+ id: str = field(default_factory=lambda: str(uuid.uuid4()))
127
+
128
+ # What we're assessing
129
+ strategy: str = ""
130
+ context: str = ""
131
+ agent: str = ""
132
+
133
+ # Optional link to existing heuristic
134
+ heuristic_id: Optional[str] = None
135
+
136
+ # Backward-looking metrics (from historical data)
137
+ historical_success_rate: float = 0.0 # 0-1, based on past outcomes
138
+ occurrence_count: int = 0 # How many times this strategy was tried
139
+
140
+ # Forward-looking predictions (computed for current context)
141
+ predicted_success: float = 0.5 # Expected success in THIS context
142
+ uncertainty: float = 0.5 # How uncertain is the prediction (0=certain, 1=very uncertain)
143
+ context_similarity: float = 0.0 # How similar is current context to past successes
144
+
145
+ # Risk signals
146
+ risk_signals: List[RiskSignal] = field(default_factory=list)
147
+ total_risk_score: float = 0.0 # Aggregated risk (0=no risk, 1=high risk)
148
+
149
+ # Opportunity signals
150
+ opportunity_signals: List[OpportunitySignal] = field(default_factory=list)
151
+ total_opportunity_score: float = 0.0 # Aggregated opportunity (0=none, 1=high)
152
+
153
+ # Combined assessment
154
+ confidence_score: float = 0.5 # Final weighted score (0-1)
155
+ recommendation: Recommendation = "neutral"
156
+
157
+ # Explanation
158
+ reasoning: str = ""
159
+
160
+ # Metadata
161
+ metadata: Dict[str, Any] = field(default_factory=dict)
162
+ assessed_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
163
+
164
+ @classmethod
165
+ def create(
166
+ cls,
167
+ strategy: str,
168
+ context: str,
169
+ agent: str,
170
+ heuristic_id: Optional[str] = None,
171
+ ) -> "ConfidenceSignal":
172
+ """Create a new confidence signal."""
173
+ return cls(
174
+ strategy=strategy,
175
+ context=context,
176
+ agent=agent,
177
+ heuristic_id=heuristic_id,
178
+ )
179
+
180
+ def add_risk(
181
+ self,
182
+ signal_type: str,
183
+ description: str,
184
+ severity: float,
185
+ source: str = "",
186
+ ) -> RiskSignal:
187
+ """Add a risk signal."""
188
+ risk = RiskSignal(
189
+ signal_type=signal_type,
190
+ description=description,
191
+ severity=severity,
192
+ source=source,
193
+ )
194
+ self.risk_signals.append(risk)
195
+ self._recalculate_scores()
196
+ return risk
197
+
198
+ def add_opportunity(
199
+ self,
200
+ signal_type: str,
201
+ description: str,
202
+ strength: float,
203
+ source: str = "",
204
+ ) -> OpportunitySignal:
205
+ """Add an opportunity signal."""
206
+ opportunity = OpportunitySignal(
207
+ signal_type=signal_type,
208
+ description=description,
209
+ strength=strength,
210
+ source=source,
211
+ )
212
+ self.opportunity_signals.append(opportunity)
213
+ self._recalculate_scores()
214
+ return opportunity
215
+
216
+ def _recalculate_scores(self) -> None:
217
+ """Recalculate total scores and recommendation."""
218
+ # Aggregate risk
219
+ if self.risk_signals:
220
+ # Use max risk as the dominant signal
221
+ self.total_risk_score = max(r.severity for r in self.risk_signals)
222
+ else:
223
+ self.total_risk_score = 0.0
224
+
225
+ # Aggregate opportunity
226
+ if self.opportunity_signals:
227
+ # Use max opportunity as the dominant signal
228
+ self.total_opportunity_score = max(o.strength for o in self.opportunity_signals)
229
+ else:
230
+ self.total_opportunity_score = 0.0
231
+
232
+ # Combined confidence score
233
+ # Weighs historical success, predicted success, and risk/opportunity balance
234
+ base_confidence = (
235
+ 0.3 * self.historical_success_rate +
236
+ 0.4 * self.predicted_success +
237
+ 0.15 * self.context_similarity +
238
+ 0.15 * (1.0 - self.uncertainty)
239
+ )
240
+
241
+ # Adjust for risk/opportunity
242
+ risk_adjustment = -0.2 * self.total_risk_score
243
+ opportunity_adjustment = 0.2 * self.total_opportunity_score
244
+
245
+ self.confidence_score = max(0.0, min(1.0,
246
+ base_confidence + risk_adjustment + opportunity_adjustment
247
+ ))
248
+
249
+ # Determine recommendation
250
+ self._update_recommendation()
251
+
252
+ def _update_recommendation(self) -> None:
253
+ """Update recommendation based on confidence score and signals."""
254
+ # High risk signals can override confidence
255
+ if self.total_risk_score >= 0.8:
256
+ self.recommendation = "avoid"
257
+ elif self.total_risk_score >= 0.6:
258
+ self.recommendation = "caution"
259
+ elif self.confidence_score >= 0.8:
260
+ self.recommendation = "strong_yes"
261
+ elif self.confidence_score >= 0.6:
262
+ self.recommendation = "yes"
263
+ elif self.confidence_score >= 0.4:
264
+ self.recommendation = "neutral"
265
+ elif self.confidence_score >= 0.2:
266
+ self.recommendation = "caution"
267
+ else:
268
+ self.recommendation = "avoid"
269
+
270
+ def to_prompt(self) -> str:
271
+ """Format confidence signal for prompt injection."""
272
+ lines = [
273
+ f"## Confidence Assessment: {self.strategy[:50]}...",
274
+ f"**Recommendation: {self.recommendation.upper()}** (score: {self.confidence_score:.2f})",
275
+ "",
276
+ ]
277
+
278
+ # Metrics
279
+ lines.append("### Metrics")
280
+ lines.append(f"- Historical success: {self.historical_success_rate:.0%} ({self.occurrence_count} uses)")
281
+ lines.append(f"- Predicted success: {self.predicted_success:.0%}")
282
+ lines.append(f"- Context similarity: {self.context_similarity:.0%}")
283
+ lines.append(f"- Uncertainty: {self.uncertainty:.0%}")
284
+ lines.append("")
285
+
286
+ # Risks
287
+ if self.risk_signals:
288
+ lines.append("### Risks")
289
+ for risk in self.risk_signals:
290
+ severity_label = "HIGH" if risk.severity >= 0.7 else "MEDIUM" if risk.severity >= 0.4 else "LOW"
291
+ lines.append(f"- [{severity_label}] {risk.description}")
292
+ lines.append("")
293
+
294
+ # Opportunities
295
+ if self.opportunity_signals:
296
+ lines.append("### Opportunities")
297
+ for opp in self.opportunity_signals:
298
+ strength_label = "STRONG" if opp.strength >= 0.7 else "MODERATE" if opp.strength >= 0.4 else "WEAK"
299
+ lines.append(f"- [{strength_label}] {opp.description}")
300
+ lines.append("")
301
+
302
+ # Reasoning
303
+ if self.reasoning:
304
+ lines.append("### Analysis")
305
+ lines.append(self.reasoning)
306
+
307
+ return "\n".join(lines)
308
+
309
+ def to_dict(self) -> Dict[str, Any]:
310
+ """Serialize to dictionary."""
311
+ return {
312
+ "id": self.id,
313
+ "strategy": self.strategy,
314
+ "context": self.context,
315
+ "agent": self.agent,
316
+ "heuristic_id": self.heuristic_id,
317
+ "historical_success_rate": self.historical_success_rate,
318
+ "occurrence_count": self.occurrence_count,
319
+ "predicted_success": self.predicted_success,
320
+ "uncertainty": self.uncertainty,
321
+ "context_similarity": self.context_similarity,
322
+ "risk_signals": [r.to_dict() for r in self.risk_signals],
323
+ "total_risk_score": self.total_risk_score,
324
+ "opportunity_signals": [o.to_dict() for o in self.opportunity_signals],
325
+ "total_opportunity_score": self.total_opportunity_score,
326
+ "confidence_score": self.confidence_score,
327
+ "recommendation": self.recommendation,
328
+ "reasoning": self.reasoning,
329
+ "metadata": self.metadata,
330
+ "assessed_at": self.assessed_at.isoformat(),
331
+ }
@@ -0,0 +1,30 @@
1
+ """
2
+ ALMA Domain Memory Module.
3
+
4
+ Provides domain-agnostic memory schemas and factory pattern
5
+ for creating domain-specific ALMA instances.
6
+ """
7
+
8
+ from alma.domains.types import (
9
+ DomainSchema,
10
+ EntityType,
11
+ RelationshipType,
12
+ )
13
+ from alma.domains.factory import DomainMemoryFactory
14
+ from alma.domains.schemas import (
15
+ get_coding_schema,
16
+ get_research_schema,
17
+ get_sales_schema,
18
+ get_general_schema,
19
+ )
20
+
21
+ __all__ = [
22
+ "DomainSchema",
23
+ "EntityType",
24
+ "RelationshipType",
25
+ "DomainMemoryFactory",
26
+ "get_coding_schema",
27
+ "get_research_schema",
28
+ "get_sales_schema",
29
+ "get_general_schema",
30
+ ]