alma-memory 0.5.1__py3-none-any.whl → 0.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (111) hide show
  1. alma/__init__.py +296 -226
  2. alma/compression/__init__.py +33 -0
  3. alma/compression/pipeline.py +980 -0
  4. alma/confidence/__init__.py +47 -47
  5. alma/confidence/engine.py +540 -540
  6. alma/confidence/types.py +351 -351
  7. alma/config/loader.py +157 -157
  8. alma/consolidation/__init__.py +23 -23
  9. alma/consolidation/engine.py +678 -678
  10. alma/consolidation/prompts.py +84 -84
  11. alma/core.py +1189 -430
  12. alma/domains/__init__.py +30 -30
  13. alma/domains/factory.py +359 -359
  14. alma/domains/schemas.py +448 -448
  15. alma/domains/types.py +272 -272
  16. alma/events/__init__.py +75 -75
  17. alma/events/emitter.py +285 -284
  18. alma/events/storage_mixin.py +246 -246
  19. alma/events/types.py +126 -126
  20. alma/events/webhook.py +425 -425
  21. alma/exceptions.py +49 -49
  22. alma/extraction/__init__.py +31 -31
  23. alma/extraction/auto_learner.py +265 -265
  24. alma/extraction/extractor.py +420 -420
  25. alma/graph/__init__.py +106 -106
  26. alma/graph/backends/__init__.py +32 -32
  27. alma/graph/backends/kuzu.py +624 -624
  28. alma/graph/backends/memgraph.py +432 -432
  29. alma/graph/backends/memory.py +236 -236
  30. alma/graph/backends/neo4j.py +417 -417
  31. alma/graph/base.py +159 -159
  32. alma/graph/extraction.py +198 -198
  33. alma/graph/store.py +860 -860
  34. alma/harness/__init__.py +35 -35
  35. alma/harness/base.py +386 -386
  36. alma/harness/domains.py +705 -705
  37. alma/initializer/__init__.py +37 -37
  38. alma/initializer/initializer.py +418 -418
  39. alma/initializer/types.py +250 -250
  40. alma/integration/__init__.py +62 -62
  41. alma/integration/claude_agents.py +444 -444
  42. alma/integration/helena.py +423 -423
  43. alma/integration/victor.py +471 -471
  44. alma/learning/__init__.py +101 -86
  45. alma/learning/decay.py +878 -0
  46. alma/learning/forgetting.py +1446 -1446
  47. alma/learning/heuristic_extractor.py +390 -390
  48. alma/learning/protocols.py +374 -374
  49. alma/learning/validation.py +346 -346
  50. alma/mcp/__init__.py +123 -45
  51. alma/mcp/__main__.py +156 -156
  52. alma/mcp/resources.py +122 -122
  53. alma/mcp/server.py +955 -591
  54. alma/mcp/tools.py +3254 -509
  55. alma/observability/__init__.py +91 -84
  56. alma/observability/config.py +302 -302
  57. alma/observability/guidelines.py +170 -0
  58. alma/observability/logging.py +424 -424
  59. alma/observability/metrics.py +583 -583
  60. alma/observability/tracing.py +440 -440
  61. alma/progress/__init__.py +21 -21
  62. alma/progress/tracker.py +607 -607
  63. alma/progress/types.py +250 -250
  64. alma/retrieval/__init__.py +134 -53
  65. alma/retrieval/budget.py +525 -0
  66. alma/retrieval/cache.py +1304 -1061
  67. alma/retrieval/embeddings.py +202 -202
  68. alma/retrieval/engine.py +850 -427
  69. alma/retrieval/modes.py +365 -0
  70. alma/retrieval/progressive.py +560 -0
  71. alma/retrieval/scoring.py +344 -344
  72. alma/retrieval/trust_scoring.py +637 -0
  73. alma/retrieval/verification.py +797 -0
  74. alma/session/__init__.py +19 -19
  75. alma/session/manager.py +442 -399
  76. alma/session/types.py +288 -288
  77. alma/storage/__init__.py +101 -90
  78. alma/storage/archive.py +233 -0
  79. alma/storage/azure_cosmos.py +1259 -1259
  80. alma/storage/base.py +1083 -583
  81. alma/storage/chroma.py +1443 -1443
  82. alma/storage/constants.py +103 -103
  83. alma/storage/file_based.py +614 -614
  84. alma/storage/migrations/__init__.py +21 -21
  85. alma/storage/migrations/base.py +321 -321
  86. alma/storage/migrations/runner.py +323 -323
  87. alma/storage/migrations/version_stores.py +337 -337
  88. alma/storage/migrations/versions/__init__.py +11 -11
  89. alma/storage/migrations/versions/v1_0_0.py +373 -373
  90. alma/storage/migrations/versions/v1_1_0_workflow_context.py +551 -0
  91. alma/storage/pinecone.py +1080 -1080
  92. alma/storage/postgresql.py +1948 -1559
  93. alma/storage/qdrant.py +1306 -1306
  94. alma/storage/sqlite_local.py +3041 -1457
  95. alma/testing/__init__.py +46 -46
  96. alma/testing/factories.py +301 -301
  97. alma/testing/mocks.py +389 -389
  98. alma/types.py +292 -264
  99. alma/utils/__init__.py +19 -0
  100. alma/utils/tokenizer.py +521 -0
  101. alma/workflow/__init__.py +83 -0
  102. alma/workflow/artifacts.py +170 -0
  103. alma/workflow/checkpoint.py +311 -0
  104. alma/workflow/context.py +228 -0
  105. alma/workflow/outcomes.py +189 -0
  106. alma/workflow/reducers.py +393 -0
  107. {alma_memory-0.5.1.dist-info → alma_memory-0.7.0.dist-info}/METADATA +210 -72
  108. alma_memory-0.7.0.dist-info/RECORD +112 -0
  109. alma_memory-0.5.1.dist-info/RECORD +0 -93
  110. {alma_memory-0.5.1.dist-info → alma_memory-0.7.0.dist-info}/WHEEL +0 -0
  111. {alma_memory-0.5.1.dist-info → alma_memory-0.7.0.dist-info}/top_level.txt +0 -0
alma/confidence/types.py CHANGED
@@ -1,351 +1,351 @@
1
- """
2
- Confidence Types.
3
-
4
- Forward-looking confidence signals for strategies.
5
- Inspired by Ilya Sutskever's insight: emotions are forward-looking value functions.
6
- """
7
-
8
- import uuid
9
- from dataclasses import dataclass, field
10
- from datetime import datetime, timezone
11
- from typing import Any, Dict, List, Literal, Optional
12
-
13
-
14
- @dataclass
15
- class RiskSignal:
16
- """
17
- A risk indicator for a strategy.
18
-
19
- Risks are signals that a strategy may not work in the current context.
20
- They can come from:
21
- - Similar past failures
22
- - Untested contexts
23
- - High complexity
24
- - Missing prerequisites
25
- """
26
-
27
- id: str = field(default_factory=lambda: str(uuid.uuid4()))
28
-
29
- # Type of risk
30
- signal_type: str = (
31
- "" # "similar_to_failure", "untested_context", "high_complexity", etc.
32
- )
33
-
34
- # Human-readable description
35
- description: str = ""
36
-
37
- # Severity: 0.0 = low risk, 1.0 = critical risk
38
- severity: float = 0.0
39
-
40
- # What triggered this signal
41
- source: str = "" # "heuristic:h123", "anti_pattern:ap456", "context_analysis"
42
-
43
- # Related memory IDs
44
- related_memories: List[str] = field(default_factory=list)
45
-
46
- # Metadata
47
- metadata: Dict[str, Any] = field(default_factory=dict)
48
- detected_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
49
-
50
- def to_dict(self) -> Dict[str, Any]:
51
- """Serialize to dictionary."""
52
- return {
53
- "id": self.id,
54
- "signal_type": self.signal_type,
55
- "description": self.description,
56
- "severity": self.severity,
57
- "source": self.source,
58
- "related_memories": self.related_memories,
59
- "metadata": self.metadata,
60
- "detected_at": self.detected_at.isoformat(),
61
- }
62
-
63
-
64
- @dataclass
65
- class OpportunitySignal:
66
- """
67
- An opportunity indicator for a strategy.
68
-
69
- Opportunities are signals that a strategy is likely to succeed.
70
- They can come from:
71
- - Proven patterns with high success rate
72
- - High similarity to past successes
73
- - Recent successful uses
74
- - Strong prerequisites met
75
- """
76
-
77
- id: str = field(default_factory=lambda: str(uuid.uuid4()))
78
-
79
- # Type of opportunity
80
- signal_type: str = "" # "proven_pattern", "high_similarity", "recent_success", etc.
81
-
82
- # Human-readable description
83
- description: str = ""
84
-
85
- # Strength: 0.0 = weak signal, 1.0 = strong signal
86
- strength: float = 0.0
87
-
88
- # What triggered this signal
89
- source: str = "" # "heuristic:h123", "outcome:o456", "pattern_match"
90
-
91
- # Related memory IDs
92
- related_memories: List[str] = field(default_factory=list)
93
-
94
- # Metadata
95
- metadata: Dict[str, Any] = field(default_factory=dict)
96
- detected_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
97
-
98
- def to_dict(self) -> Dict[str, Any]:
99
- """Serialize to dictionary."""
100
- return {
101
- "id": self.id,
102
- "signal_type": self.signal_type,
103
- "description": self.description,
104
- "strength": self.strength,
105
- "source": self.source,
106
- "related_memories": self.related_memories,
107
- "metadata": self.metadata,
108
- "detected_at": self.detected_at.isoformat(),
109
- }
110
-
111
-
112
- # Recommendation levels
113
- Recommendation = Literal["strong_yes", "yes", "neutral", "caution", "avoid"]
114
-
115
-
116
- @dataclass
117
- class ConfidenceSignal:
118
- """
119
- Forward-looking confidence assessment for a strategy.
120
-
121
- Combines backward-looking metrics (historical success) with
122
- forward-looking predictions (expected success in current context).
123
-
124
- This is the "gut feeling" that tells an agent whether a strategy
125
- is likely to work before trying it.
126
- """
127
-
128
- id: str = field(default_factory=lambda: str(uuid.uuid4()))
129
-
130
- # What we're assessing
131
- strategy: str = ""
132
- context: str = ""
133
- agent: str = ""
134
-
135
- # Optional link to existing heuristic
136
- heuristic_id: Optional[str] = None
137
-
138
- # Backward-looking metrics (from historical data)
139
- historical_success_rate: float = 0.0 # 0-1, based on past outcomes
140
- occurrence_count: int = 0 # How many times this strategy was tried
141
-
142
- # Forward-looking predictions (computed for current context)
143
- predicted_success: float = 0.5 # Expected success in THIS context
144
- uncertainty: float = (
145
- 0.5 # How uncertain is the prediction (0=certain, 1=very uncertain)
146
- )
147
- context_similarity: float = 0.0 # How similar is current context to past successes
148
-
149
- # Risk signals
150
- risk_signals: List[RiskSignal] = field(default_factory=list)
151
- total_risk_score: float = 0.0 # Aggregated risk (0=no risk, 1=high risk)
152
-
153
- # Opportunity signals
154
- opportunity_signals: List[OpportunitySignal] = field(default_factory=list)
155
- total_opportunity_score: float = 0.0 # Aggregated opportunity (0=none, 1=high)
156
-
157
- # Combined assessment
158
- confidence_score: float = 0.5 # Final weighted score (0-1)
159
- recommendation: Recommendation = "neutral"
160
-
161
- # Explanation
162
- reasoning: str = ""
163
-
164
- # Metadata
165
- metadata: Dict[str, Any] = field(default_factory=dict)
166
- assessed_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
167
-
168
- @classmethod
169
- def create(
170
- cls,
171
- strategy: str,
172
- context: str,
173
- agent: str,
174
- heuristic_id: Optional[str] = None,
175
- ) -> "ConfidenceSignal":
176
- """Create a new confidence signal."""
177
- return cls(
178
- strategy=strategy,
179
- context=context,
180
- agent=agent,
181
- heuristic_id=heuristic_id,
182
- )
183
-
184
- def add_risk(
185
- self,
186
- signal_type: str,
187
- description: str,
188
- severity: float,
189
- source: str = "",
190
- ) -> RiskSignal:
191
- """Add a risk signal."""
192
- risk = RiskSignal(
193
- signal_type=signal_type,
194
- description=description,
195
- severity=severity,
196
- source=source,
197
- )
198
- self.risk_signals.append(risk)
199
- self._recalculate_scores()
200
- return risk
201
-
202
- def add_opportunity(
203
- self,
204
- signal_type: str,
205
- description: str,
206
- strength: float,
207
- source: str = "",
208
- ) -> OpportunitySignal:
209
- """Add an opportunity signal."""
210
- opportunity = OpportunitySignal(
211
- signal_type=signal_type,
212
- description=description,
213
- strength=strength,
214
- source=source,
215
- )
216
- self.opportunity_signals.append(opportunity)
217
- self._recalculate_scores()
218
- return opportunity
219
-
220
- def _recalculate_scores(self) -> None:
221
- """Recalculate total scores and recommendation."""
222
- # Aggregate risk
223
- if self.risk_signals:
224
- # Use max risk as the dominant signal
225
- self.total_risk_score = max(r.severity for r in self.risk_signals)
226
- else:
227
- self.total_risk_score = 0.0
228
-
229
- # Aggregate opportunity
230
- if self.opportunity_signals:
231
- # Use max opportunity as the dominant signal
232
- self.total_opportunity_score = max(
233
- o.strength for o in self.opportunity_signals
234
- )
235
- else:
236
- self.total_opportunity_score = 0.0
237
-
238
- # Combined confidence score
239
- # Weighs historical success, predicted success, and risk/opportunity balance
240
- base_confidence = (
241
- 0.3 * self.historical_success_rate
242
- + 0.4 * self.predicted_success
243
- + 0.15 * self.context_similarity
244
- + 0.15 * (1.0 - self.uncertainty)
245
- )
246
-
247
- # Adjust for risk/opportunity
248
- risk_adjustment = -0.2 * self.total_risk_score
249
- opportunity_adjustment = 0.2 * self.total_opportunity_score
250
-
251
- self.confidence_score = max(
252
- 0.0, min(1.0, base_confidence + risk_adjustment + opportunity_adjustment)
253
- )
254
-
255
- # Determine recommendation
256
- self._update_recommendation()
257
-
258
- def _update_recommendation(self) -> None:
259
- """Update recommendation based on confidence score and signals."""
260
- # High risk signals can override confidence
261
- if self.total_risk_score >= 0.8:
262
- self.recommendation = "avoid"
263
- elif self.total_risk_score >= 0.6:
264
- self.recommendation = "caution"
265
- elif self.confidence_score >= 0.8:
266
- self.recommendation = "strong_yes"
267
- elif self.confidence_score >= 0.6:
268
- self.recommendation = "yes"
269
- elif self.confidence_score >= 0.4:
270
- self.recommendation = "neutral"
271
- elif self.confidence_score >= 0.2:
272
- self.recommendation = "caution"
273
- else:
274
- self.recommendation = "avoid"
275
-
276
- def to_prompt(self) -> str:
277
- """Format confidence signal for prompt injection."""
278
- lines = [
279
- f"## Confidence Assessment: {self.strategy[:50]}...",
280
- f"**Recommendation: {self.recommendation.upper()}** (score: {self.confidence_score:.2f})",
281
- "",
282
- ]
283
-
284
- # Metrics
285
- lines.append("### Metrics")
286
- lines.append(
287
- f"- Historical success: {self.historical_success_rate:.0%} ({self.occurrence_count} uses)"
288
- )
289
- lines.append(f"- Predicted success: {self.predicted_success:.0%}")
290
- lines.append(f"- Context similarity: {self.context_similarity:.0%}")
291
- lines.append(f"- Uncertainty: {self.uncertainty:.0%}")
292
- lines.append("")
293
-
294
- # Risks
295
- if self.risk_signals:
296
- lines.append("### Risks")
297
- for risk in self.risk_signals:
298
- severity_label = (
299
- "HIGH"
300
- if risk.severity >= 0.7
301
- else "MEDIUM"
302
- if risk.severity >= 0.4
303
- else "LOW"
304
- )
305
- lines.append(f"- [{severity_label}] {risk.description}")
306
- lines.append("")
307
-
308
- # Opportunities
309
- if self.opportunity_signals:
310
- lines.append("### Opportunities")
311
- for opp in self.opportunity_signals:
312
- strength_label = (
313
- "STRONG"
314
- if opp.strength >= 0.7
315
- else "MODERATE"
316
- if opp.strength >= 0.4
317
- else "WEAK"
318
- )
319
- lines.append(f"- [{strength_label}] {opp.description}")
320
- lines.append("")
321
-
322
- # Reasoning
323
- if self.reasoning:
324
- lines.append("### Analysis")
325
- lines.append(self.reasoning)
326
-
327
- return "\n".join(lines)
328
-
329
- def to_dict(self) -> Dict[str, Any]:
330
- """Serialize to dictionary."""
331
- return {
332
- "id": self.id,
333
- "strategy": self.strategy,
334
- "context": self.context,
335
- "agent": self.agent,
336
- "heuristic_id": self.heuristic_id,
337
- "historical_success_rate": self.historical_success_rate,
338
- "occurrence_count": self.occurrence_count,
339
- "predicted_success": self.predicted_success,
340
- "uncertainty": self.uncertainty,
341
- "context_similarity": self.context_similarity,
342
- "risk_signals": [r.to_dict() for r in self.risk_signals],
343
- "total_risk_score": self.total_risk_score,
344
- "opportunity_signals": [o.to_dict() for o in self.opportunity_signals],
345
- "total_opportunity_score": self.total_opportunity_score,
346
- "confidence_score": self.confidence_score,
347
- "recommendation": self.recommendation,
348
- "reasoning": self.reasoning,
349
- "metadata": self.metadata,
350
- "assessed_at": self.assessed_at.isoformat(),
351
- }
1
+ """
2
+ Confidence Types.
3
+
4
+ Forward-looking confidence signals for strategies.
5
+ Inspired by Ilya Sutskever's insight: emotions are forward-looking value functions.
6
+ """
7
+
8
+ import uuid
9
+ from dataclasses import dataclass, field
10
+ from datetime import datetime, timezone
11
+ from typing import Any, Dict, List, Literal, Optional
12
+
13
+
14
+ @dataclass
15
+ class RiskSignal:
16
+ """
17
+ A risk indicator for a strategy.
18
+
19
+ Risks are signals that a strategy may not work in the current context.
20
+ They can come from:
21
+ - Similar past failures
22
+ - Untested contexts
23
+ - High complexity
24
+ - Missing prerequisites
25
+ """
26
+
27
+ id: str = field(default_factory=lambda: str(uuid.uuid4()))
28
+
29
+ # Type of risk
30
+ signal_type: str = (
31
+ "" # "similar_to_failure", "untested_context", "high_complexity", etc.
32
+ )
33
+
34
+ # Human-readable description
35
+ description: str = ""
36
+
37
+ # Severity: 0.0 = low risk, 1.0 = critical risk
38
+ severity: float = 0.0
39
+
40
+ # What triggered this signal
41
+ source: str = "" # "heuristic:h123", "anti_pattern:ap456", "context_analysis"
42
+
43
+ # Related memory IDs
44
+ related_memories: List[str] = field(default_factory=list)
45
+
46
+ # Metadata
47
+ metadata: Dict[str, Any] = field(default_factory=dict)
48
+ detected_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
49
+
50
+ def to_dict(self) -> Dict[str, Any]:
51
+ """Serialize to dictionary."""
52
+ return {
53
+ "id": self.id,
54
+ "signal_type": self.signal_type,
55
+ "description": self.description,
56
+ "severity": self.severity,
57
+ "source": self.source,
58
+ "related_memories": self.related_memories,
59
+ "metadata": self.metadata,
60
+ "detected_at": self.detected_at.isoformat(),
61
+ }
62
+
63
+
64
+ @dataclass
65
+ class OpportunitySignal:
66
+ """
67
+ An opportunity indicator for a strategy.
68
+
69
+ Opportunities are signals that a strategy is likely to succeed.
70
+ They can come from:
71
+ - Proven patterns with high success rate
72
+ - High similarity to past successes
73
+ - Recent successful uses
74
+ - Strong prerequisites met
75
+ """
76
+
77
+ id: str = field(default_factory=lambda: str(uuid.uuid4()))
78
+
79
+ # Type of opportunity
80
+ signal_type: str = "" # "proven_pattern", "high_similarity", "recent_success", etc.
81
+
82
+ # Human-readable description
83
+ description: str = ""
84
+
85
+ # Strength: 0.0 = weak signal, 1.0 = strong signal
86
+ strength: float = 0.0
87
+
88
+ # What triggered this signal
89
+ source: str = "" # "heuristic:h123", "outcome:o456", "pattern_match"
90
+
91
+ # Related memory IDs
92
+ related_memories: List[str] = field(default_factory=list)
93
+
94
+ # Metadata
95
+ metadata: Dict[str, Any] = field(default_factory=dict)
96
+ detected_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
97
+
98
+ def to_dict(self) -> Dict[str, Any]:
99
+ """Serialize to dictionary."""
100
+ return {
101
+ "id": self.id,
102
+ "signal_type": self.signal_type,
103
+ "description": self.description,
104
+ "strength": self.strength,
105
+ "source": self.source,
106
+ "related_memories": self.related_memories,
107
+ "metadata": self.metadata,
108
+ "detected_at": self.detected_at.isoformat(),
109
+ }
110
+
111
+
112
+ # Recommendation levels
113
+ Recommendation = Literal["strong_yes", "yes", "neutral", "caution", "avoid"]
114
+
115
+
116
+ @dataclass
117
+ class ConfidenceSignal:
118
+ """
119
+ Forward-looking confidence assessment for a strategy.
120
+
121
+ Combines backward-looking metrics (historical success) with
122
+ forward-looking predictions (expected success in current context).
123
+
124
+ This is the "gut feeling" that tells an agent whether a strategy
125
+ is likely to work before trying it.
126
+ """
127
+
128
+ id: str = field(default_factory=lambda: str(uuid.uuid4()))
129
+
130
+ # What we're assessing
131
+ strategy: str = ""
132
+ context: str = ""
133
+ agent: str = ""
134
+
135
+ # Optional link to existing heuristic
136
+ heuristic_id: Optional[str] = None
137
+
138
+ # Backward-looking metrics (from historical data)
139
+ historical_success_rate: float = 0.0 # 0-1, based on past outcomes
140
+ occurrence_count: int = 0 # How many times this strategy was tried
141
+
142
+ # Forward-looking predictions (computed for current context)
143
+ predicted_success: float = 0.5 # Expected success in THIS context
144
+ uncertainty: float = (
145
+ 0.5 # How uncertain is the prediction (0=certain, 1=very uncertain)
146
+ )
147
+ context_similarity: float = 0.0 # How similar is current context to past successes
148
+
149
+ # Risk signals
150
+ risk_signals: List[RiskSignal] = field(default_factory=list)
151
+ total_risk_score: float = 0.0 # Aggregated risk (0=no risk, 1=high risk)
152
+
153
+ # Opportunity signals
154
+ opportunity_signals: List[OpportunitySignal] = field(default_factory=list)
155
+ total_opportunity_score: float = 0.0 # Aggregated opportunity (0=none, 1=high)
156
+
157
+ # Combined assessment
158
+ confidence_score: float = 0.5 # Final weighted score (0-1)
159
+ recommendation: Recommendation = "neutral"
160
+
161
+ # Explanation
162
+ reasoning: str = ""
163
+
164
+ # Metadata
165
+ metadata: Dict[str, Any] = field(default_factory=dict)
166
+ assessed_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
167
+
168
+ @classmethod
169
+ def create(
170
+ cls,
171
+ strategy: str,
172
+ context: str,
173
+ agent: str,
174
+ heuristic_id: Optional[str] = None,
175
+ ) -> "ConfidenceSignal":
176
+ """Create a new confidence signal."""
177
+ return cls(
178
+ strategy=strategy,
179
+ context=context,
180
+ agent=agent,
181
+ heuristic_id=heuristic_id,
182
+ )
183
+
184
+ def add_risk(
185
+ self,
186
+ signal_type: str,
187
+ description: str,
188
+ severity: float,
189
+ source: str = "",
190
+ ) -> RiskSignal:
191
+ """Add a risk signal."""
192
+ risk = RiskSignal(
193
+ signal_type=signal_type,
194
+ description=description,
195
+ severity=severity,
196
+ source=source,
197
+ )
198
+ self.risk_signals.append(risk)
199
+ self._recalculate_scores()
200
+ return risk
201
+
202
+ def add_opportunity(
203
+ self,
204
+ signal_type: str,
205
+ description: str,
206
+ strength: float,
207
+ source: str = "",
208
+ ) -> OpportunitySignal:
209
+ """Add an opportunity signal."""
210
+ opportunity = OpportunitySignal(
211
+ signal_type=signal_type,
212
+ description=description,
213
+ strength=strength,
214
+ source=source,
215
+ )
216
+ self.opportunity_signals.append(opportunity)
217
+ self._recalculate_scores()
218
+ return opportunity
219
+
220
+ def _recalculate_scores(self) -> None:
221
+ """Recalculate total scores and recommendation."""
222
+ # Aggregate risk
223
+ if self.risk_signals:
224
+ # Use max risk as the dominant signal
225
+ self.total_risk_score = max(r.severity for r in self.risk_signals)
226
+ else:
227
+ self.total_risk_score = 0.0
228
+
229
+ # Aggregate opportunity
230
+ if self.opportunity_signals:
231
+ # Use max opportunity as the dominant signal
232
+ self.total_opportunity_score = max(
233
+ o.strength for o in self.opportunity_signals
234
+ )
235
+ else:
236
+ self.total_opportunity_score = 0.0
237
+
238
+ # Combined confidence score
239
+ # Weighs historical success, predicted success, and risk/opportunity balance
240
+ base_confidence = (
241
+ 0.3 * self.historical_success_rate
242
+ + 0.4 * self.predicted_success
243
+ + 0.15 * self.context_similarity
244
+ + 0.15 * (1.0 - self.uncertainty)
245
+ )
246
+
247
+ # Adjust for risk/opportunity
248
+ risk_adjustment = -0.2 * self.total_risk_score
249
+ opportunity_adjustment = 0.2 * self.total_opportunity_score
250
+
251
+ self.confidence_score = max(
252
+ 0.0, min(1.0, base_confidence + risk_adjustment + opportunity_adjustment)
253
+ )
254
+
255
+ # Determine recommendation
256
+ self._update_recommendation()
257
+
258
+ def _update_recommendation(self) -> None:
259
+ """Update recommendation based on confidence score and signals."""
260
+ # High risk signals can override confidence
261
+ if self.total_risk_score >= 0.8:
262
+ self.recommendation = "avoid"
263
+ elif self.total_risk_score >= 0.6:
264
+ self.recommendation = "caution"
265
+ elif self.confidence_score >= 0.8:
266
+ self.recommendation = "strong_yes"
267
+ elif self.confidence_score >= 0.6:
268
+ self.recommendation = "yes"
269
+ elif self.confidence_score >= 0.4:
270
+ self.recommendation = "neutral"
271
+ elif self.confidence_score >= 0.2:
272
+ self.recommendation = "caution"
273
+ else:
274
+ self.recommendation = "avoid"
275
+
276
+ def to_prompt(self) -> str:
277
+ """Format confidence signal for prompt injection."""
278
+ lines = [
279
+ f"## Confidence Assessment: {self.strategy[:50]}...",
280
+ f"**Recommendation: {self.recommendation.upper()}** (score: {self.confidence_score:.2f})",
281
+ "",
282
+ ]
283
+
284
+ # Metrics
285
+ lines.append("### Metrics")
286
+ lines.append(
287
+ f"- Historical success: {self.historical_success_rate:.0%} ({self.occurrence_count} uses)"
288
+ )
289
+ lines.append(f"- Predicted success: {self.predicted_success:.0%}")
290
+ lines.append(f"- Context similarity: {self.context_similarity:.0%}")
291
+ lines.append(f"- Uncertainty: {self.uncertainty:.0%}")
292
+ lines.append("")
293
+
294
+ # Risks
295
+ if self.risk_signals:
296
+ lines.append("### Risks")
297
+ for risk in self.risk_signals:
298
+ severity_label = (
299
+ "HIGH"
300
+ if risk.severity >= 0.7
301
+ else "MEDIUM"
302
+ if risk.severity >= 0.4
303
+ else "LOW"
304
+ )
305
+ lines.append(f"- [{severity_label}] {risk.description}")
306
+ lines.append("")
307
+
308
+ # Opportunities
309
+ if self.opportunity_signals:
310
+ lines.append("### Opportunities")
311
+ for opp in self.opportunity_signals:
312
+ strength_label = (
313
+ "STRONG"
314
+ if opp.strength >= 0.7
315
+ else "MODERATE"
316
+ if opp.strength >= 0.4
317
+ else "WEAK"
318
+ )
319
+ lines.append(f"- [{strength_label}] {opp.description}")
320
+ lines.append("")
321
+
322
+ # Reasoning
323
+ if self.reasoning:
324
+ lines.append("### Analysis")
325
+ lines.append(self.reasoning)
326
+
327
+ return "\n".join(lines)
328
+
329
+ def to_dict(self) -> Dict[str, Any]:
330
+ """Serialize to dictionary."""
331
+ return {
332
+ "id": self.id,
333
+ "strategy": self.strategy,
334
+ "context": self.context,
335
+ "agent": self.agent,
336
+ "heuristic_id": self.heuristic_id,
337
+ "historical_success_rate": self.historical_success_rate,
338
+ "occurrence_count": self.occurrence_count,
339
+ "predicted_success": self.predicted_success,
340
+ "uncertainty": self.uncertainty,
341
+ "context_similarity": self.context_similarity,
342
+ "risk_signals": [r.to_dict() for r in self.risk_signals],
343
+ "total_risk_score": self.total_risk_score,
344
+ "opportunity_signals": [o.to_dict() for o in self.opportunity_signals],
345
+ "total_opportunity_score": self.total_opportunity_score,
346
+ "confidence_score": self.confidence_score,
347
+ "recommendation": self.recommendation,
348
+ "reasoning": self.reasoning,
349
+ "metadata": self.metadata,
350
+ "assessed_at": self.assessed_at.isoformat(),
351
+ }