alma-memory 0.3.0__py3-none-any.whl → 0.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (77) hide show
  1. alma/__init__.py +99 -29
  2. alma/confidence/__init__.py +47 -0
  3. alma/confidence/engine.py +540 -0
  4. alma/confidence/types.py +351 -0
  5. alma/config/loader.py +3 -2
  6. alma/consolidation/__init__.py +23 -0
  7. alma/consolidation/engine.py +678 -0
  8. alma/consolidation/prompts.py +84 -0
  9. alma/core.py +15 -15
  10. alma/domains/__init__.py +6 -6
  11. alma/domains/factory.py +12 -9
  12. alma/domains/schemas.py +17 -3
  13. alma/domains/types.py +8 -4
  14. alma/events/__init__.py +75 -0
  15. alma/events/emitter.py +284 -0
  16. alma/events/storage_mixin.py +246 -0
  17. alma/events/types.py +126 -0
  18. alma/events/webhook.py +425 -0
  19. alma/exceptions.py +49 -0
  20. alma/extraction/__init__.py +31 -0
  21. alma/extraction/auto_learner.py +264 -0
  22. alma/extraction/extractor.py +420 -0
  23. alma/graph/__init__.py +81 -0
  24. alma/graph/backends/__init__.py +18 -0
  25. alma/graph/backends/memory.py +236 -0
  26. alma/graph/backends/neo4j.py +417 -0
  27. alma/graph/base.py +159 -0
  28. alma/graph/extraction.py +198 -0
  29. alma/graph/store.py +860 -0
  30. alma/harness/__init__.py +4 -4
  31. alma/harness/base.py +18 -9
  32. alma/harness/domains.py +27 -11
  33. alma/initializer/__init__.py +37 -0
  34. alma/initializer/initializer.py +418 -0
  35. alma/initializer/types.py +250 -0
  36. alma/integration/__init__.py +9 -9
  37. alma/integration/claude_agents.py +10 -10
  38. alma/integration/helena.py +32 -22
  39. alma/integration/victor.py +57 -33
  40. alma/learning/__init__.py +27 -27
  41. alma/learning/forgetting.py +198 -148
  42. alma/learning/heuristic_extractor.py +40 -24
  43. alma/learning/protocols.py +62 -14
  44. alma/learning/validation.py +7 -2
  45. alma/mcp/__init__.py +4 -4
  46. alma/mcp/__main__.py +2 -1
  47. alma/mcp/resources.py +17 -16
  48. alma/mcp/server.py +102 -44
  49. alma/mcp/tools.py +174 -37
  50. alma/progress/__init__.py +3 -3
  51. alma/progress/tracker.py +26 -20
  52. alma/progress/types.py +8 -12
  53. alma/py.typed +0 -0
  54. alma/retrieval/__init__.py +11 -11
  55. alma/retrieval/cache.py +20 -21
  56. alma/retrieval/embeddings.py +4 -4
  57. alma/retrieval/engine.py +114 -35
  58. alma/retrieval/scoring.py +73 -63
  59. alma/session/__init__.py +2 -2
  60. alma/session/manager.py +5 -5
  61. alma/session/types.py +5 -4
  62. alma/storage/__init__.py +41 -0
  63. alma/storage/azure_cosmos.py +107 -31
  64. alma/storage/base.py +157 -4
  65. alma/storage/chroma.py +1443 -0
  66. alma/storage/file_based.py +56 -20
  67. alma/storage/pinecone.py +1080 -0
  68. alma/storage/postgresql.py +1452 -0
  69. alma/storage/qdrant.py +1306 -0
  70. alma/storage/sqlite_local.py +376 -31
  71. alma/types.py +62 -14
  72. alma_memory-0.5.0.dist-info/METADATA +905 -0
  73. alma_memory-0.5.0.dist-info/RECORD +76 -0
  74. {alma_memory-0.3.0.dist-info → alma_memory-0.5.0.dist-info}/WHEEL +1 -1
  75. alma_memory-0.3.0.dist-info/METADATA +0 -438
  76. alma_memory-0.3.0.dist-info/RECORD +0 -46
  77. {alma_memory-0.3.0.dist-info → alma_memory-0.5.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,540 @@
1
+ """
2
+ Confidence Engine.
3
+
4
+ Computes forward-looking confidence for strategies.
5
+ Not just "this worked before" but "this will likely work now."
6
+ """
7
+
8
+ import logging
9
+ from typing import Any, List, Optional, Tuple
10
+
11
+ from alma.confidence.types import (
12
+ ConfidenceSignal,
13
+ OpportunitySignal,
14
+ RiskSignal,
15
+ )
16
+
17
+ logger = logging.getLogger(__name__)
18
+
19
+
20
+ class ConfidenceEngine:
21
+ """
22
+ Compute forward-looking confidence for strategies.
23
+
24
+ The engine combines:
25
+ 1. Historical data (past outcomes, heuristics)
26
+ 2. Context analysis (similarity to past successes/failures)
27
+ 3. Risk detection (anti-patterns, untested contexts)
28
+ 4. Opportunity detection (proven patterns, recent successes)
29
+
30
+ Usage:
31
+ engine = ConfidenceEngine(alma)
32
+
33
+ # Assess a single strategy
34
+ signal = engine.assess_strategy(
35
+ strategy="Use incremental validation for forms",
36
+ context="Testing a 5-field registration form",
37
+ agent="Helena",
38
+ )
39
+ print(f"Confidence: {signal.confidence_score:.0%}")
40
+ print(f"Recommendation: {signal.recommendation}")
41
+
42
+ # Rank multiple strategies
43
+ rankings = engine.rank_strategies(
44
+ strategies=["Strategy A", "Strategy B", "Strategy C"],
45
+ context="Current task context",
46
+ agent="Helena",
47
+ )
48
+ for strategy, signal in rankings:
49
+ print(f"{strategy}: {signal.recommendation}")
50
+ """
51
+
52
+ def __init__(
53
+ self,
54
+ alma: Optional[Any] = None,
55
+ similarity_threshold: float = 0.7,
56
+ min_occurrences_for_confidence: int = 3,
57
+ ):
58
+ """
59
+ Initialize the ConfidenceEngine.
60
+
61
+ Args:
62
+ alma: ALMA instance for memory access
63
+ similarity_threshold: Minimum similarity for "high similarity" signals
64
+ min_occurrences_for_confidence: Minimum uses before trusting historical rate
65
+ """
66
+ self.alma = alma
67
+ self.similarity_threshold = similarity_threshold
68
+ self.min_occurrences_for_confidence = min_occurrences_for_confidence
69
+
70
+ def assess_strategy(
71
+ self,
72
+ strategy: str,
73
+ context: str,
74
+ agent: str,
75
+ heuristic: Optional[Any] = None,
76
+ ) -> ConfidenceSignal:
77
+ """
78
+ Assess confidence for a strategy in the current context.
79
+
80
+ Args:
81
+ strategy: The strategy to assess
82
+ context: Current task/context description
83
+ agent: Agent name
84
+ heuristic: Optional existing heuristic for this strategy
85
+
86
+ Returns:
87
+ ConfidenceSignal with full assessment
88
+ """
89
+ signal = ConfidenceSignal.create(
90
+ strategy=strategy,
91
+ context=context,
92
+ agent=agent,
93
+ heuristic_id=heuristic.id if heuristic else None,
94
+ )
95
+
96
+ # 1. Load historical data from heuristic
97
+ if heuristic:
98
+ signal.occurrence_count = getattr(heuristic, "occurrence_count", 0)
99
+ success_count = getattr(heuristic, "success_count", 0)
100
+ if signal.occurrence_count > 0:
101
+ signal.historical_success_rate = success_count / signal.occurrence_count
102
+ signal.metadata["heuristic_confidence"] = getattr(
103
+ heuristic, "confidence", 0.5
104
+ )
105
+
106
+ # 2. Analyze context similarity
107
+ signal.context_similarity = self._compute_context_similarity(
108
+ strategy=strategy,
109
+ context=context,
110
+ agent=agent,
111
+ )
112
+
113
+ # 3. Compute predicted success
114
+ signal.predicted_success = self._predict_success(
115
+ strategy=strategy,
116
+ context=context,
117
+ agent=agent,
118
+ historical_rate=signal.historical_success_rate,
119
+ context_similarity=signal.context_similarity,
120
+ )
121
+
122
+ # 4. Compute uncertainty
123
+ signal.uncertainty = self._compute_uncertainty(
124
+ occurrence_count=signal.occurrence_count,
125
+ context_similarity=signal.context_similarity,
126
+ )
127
+
128
+ # 5. Detect risks
129
+ risks = self.detect_risks(strategy, context, agent)
130
+ for risk in risks:
131
+ signal.risk_signals.append(risk)
132
+
133
+ # 6. Detect opportunities
134
+ opportunities = self.detect_opportunities(strategy, context, agent)
135
+ for opp in opportunities:
136
+ signal.opportunity_signals.append(opp)
137
+
138
+ # 7. Recalculate scores (triggers recommendation update)
139
+ signal._recalculate_scores()
140
+
141
+ # 8. Generate reasoning
142
+ signal.reasoning = self._generate_reasoning(signal)
143
+
144
+ logger.debug(
145
+ f"Assessed strategy '{strategy[:30]}...': "
146
+ f"confidence={signal.confidence_score:.2f}, "
147
+ f"recommendation={signal.recommendation}"
148
+ )
149
+
150
+ return signal
151
+
152
+ def rank_strategies(
153
+ self,
154
+ strategies: List[str],
155
+ context: str,
156
+ agent: str,
157
+ ) -> List[Tuple[str, ConfidenceSignal]]:
158
+ """
159
+ Rank multiple strategies by confidence.
160
+
161
+ Args:
162
+ strategies: List of strategies to rank
163
+ context: Current context
164
+ agent: Agent name
165
+
166
+ Returns:
167
+ List of (strategy, signal) tuples, sorted by confidence (highest first)
168
+ """
169
+ results = []
170
+
171
+ for strategy in strategies:
172
+ signal = self.assess_strategy(
173
+ strategy=strategy,
174
+ context=context,
175
+ agent=agent,
176
+ )
177
+ results.append((strategy, signal))
178
+
179
+ # Sort by confidence score (highest first)
180
+ results.sort(key=lambda x: x[1].confidence_score, reverse=True)
181
+
182
+ return results
183
+
184
+ def detect_risks(
185
+ self,
186
+ strategy: str,
187
+ context: str,
188
+ agent: str,
189
+ ) -> List[RiskSignal]:
190
+ """
191
+ Detect risk signals for a strategy.
192
+
193
+ Checks for:
194
+ - Similar past failures (anti-patterns)
195
+ - Untested contexts
196
+ - High complexity indicators
197
+ - Missing prerequisites
198
+
199
+ Args:
200
+ strategy: Strategy to assess
201
+ context: Current context
202
+ agent: Agent name
203
+
204
+ Returns:
205
+ List of detected risk signals
206
+ """
207
+ risks = []
208
+
209
+ # Check for anti-patterns in ALMA
210
+ if self.alma:
211
+ try:
212
+ # Search for similar anti-patterns
213
+ memories = self.alma.retrieve(
214
+ task=f"{strategy} {context}",
215
+ agent=agent,
216
+ top_k=10,
217
+ )
218
+
219
+ if memories and hasattr(memories, "anti_patterns"):
220
+ for ap in memories.anti_patterns[:3]:
221
+ # Check if this anti-pattern relates to our strategy
222
+ if self._is_similar(strategy, ap.strategy):
223
+ risks.append(
224
+ RiskSignal(
225
+ signal_type="similar_to_failure",
226
+ description=f"Similar to known anti-pattern: {ap.reason[:100]}",
227
+ severity=0.7,
228
+ source=f"anti_pattern:{ap.id}",
229
+ related_memories=[ap.id],
230
+ )
231
+ )
232
+ except Exception as e:
233
+ logger.warning(f"Failed to check anti-patterns: {e}")
234
+
235
+ # Check for complexity indicators
236
+ complexity_keywords = [
237
+ "complex",
238
+ "multiple",
239
+ "all",
240
+ "every",
241
+ "entire",
242
+ "complete",
243
+ ]
244
+ complexity_score = sum(
245
+ 1 for kw in complexity_keywords if kw in strategy.lower()
246
+ )
247
+ if complexity_score >= 2:
248
+ risks.append(
249
+ RiskSignal(
250
+ signal_type="high_complexity",
251
+ description="Strategy appears complex - consider breaking into smaller steps",
252
+ severity=0.4,
253
+ source="context_analysis",
254
+ )
255
+ )
256
+
257
+ # Check for risky patterns
258
+ risky_patterns = [
259
+ ("sleep", "Time-based waits can cause flaky behavior", 0.6),
260
+ ("force", "Force operations can have unintended side effects", 0.5),
261
+ ("delete all", "Bulk deletions are high-risk", 0.8),
262
+ ("production", "Production operations require extra caution", 0.7),
263
+ ]
264
+ for pattern, description, severity in risky_patterns:
265
+ if pattern in strategy.lower():
266
+ risks.append(
267
+ RiskSignal(
268
+ signal_type="risky_pattern",
269
+ description=description,
270
+ severity=severity,
271
+ source="pattern_match",
272
+ )
273
+ )
274
+
275
+ return risks
276
+
277
+ def detect_opportunities(
278
+ self,
279
+ strategy: str,
280
+ context: str,
281
+ agent: str,
282
+ ) -> List[OpportunitySignal]:
283
+ """
284
+ Detect opportunity signals for a strategy.
285
+
286
+ Checks for:
287
+ - Proven patterns with high success rate
288
+ - High similarity to past successes
289
+ - Recent successful uses
290
+ - Strong prerequisite matches
291
+
292
+ Args:
293
+ strategy: Strategy to assess
294
+ context: Current context
295
+ agent: Agent name
296
+
297
+ Returns:
298
+ List of detected opportunity signals
299
+ """
300
+ opportunities = []
301
+
302
+ # Check for matching heuristics in ALMA
303
+ if self.alma:
304
+ try:
305
+ memories = self.alma.retrieve(
306
+ task=f"{strategy} {context}",
307
+ agent=agent,
308
+ top_k=10,
309
+ )
310
+
311
+ if memories and hasattr(memories, "heuristics"):
312
+ for h in memories.heuristics[:3]:
313
+ # Check success rate
314
+ if h.occurrence_count >= self.min_occurrences_for_confidence:
315
+ success_rate = (
316
+ h.success_count / h.occurrence_count
317
+ if h.occurrence_count > 0
318
+ else 0
319
+ )
320
+ if success_rate >= 0.8:
321
+ opportunities.append(
322
+ OpportunitySignal(
323
+ signal_type="proven_pattern",
324
+ description=f"Proven strategy with {success_rate:.0%} success rate over {h.occurrence_count} uses",
325
+ strength=min(0.9, success_rate),
326
+ source=f"heuristic:{h.id}",
327
+ related_memories=[h.id],
328
+ )
329
+ )
330
+
331
+ # Check for recent successes in outcomes
332
+ if hasattr(memories, "outcomes"):
333
+ recent_successes = [
334
+ o
335
+ for o in memories.outcomes
336
+ if getattr(o, "outcome", "") == "success"
337
+ ][:3]
338
+ if recent_successes:
339
+ opportunities.append(
340
+ OpportunitySignal(
341
+ signal_type="recent_success",
342
+ description=f"Similar approach succeeded recently ({len(recent_successes)} recent successes)",
343
+ strength=0.6,
344
+ source="outcome_analysis",
345
+ related_memories=[o.id for o in recent_successes],
346
+ )
347
+ )
348
+
349
+ except Exception as e:
350
+ logger.warning(f"Failed to check opportunities: {e}")
351
+
352
+ # Check for best practice patterns
353
+ best_practices = [
354
+ ("incremental", "Incremental approaches reduce risk", 0.5),
355
+ ("test first", "Test-first approaches catch issues early", 0.6),
356
+ ("validate", "Validation prevents downstream errors", 0.5),
357
+ ("small steps", "Small steps are easier to debug", 0.4),
358
+ ]
359
+ for pattern, description, strength in best_practices:
360
+ if pattern in strategy.lower():
361
+ opportunities.append(
362
+ OpportunitySignal(
363
+ signal_type="best_practice",
364
+ description=description,
365
+ strength=strength,
366
+ source="pattern_match",
367
+ )
368
+ )
369
+
370
+ return opportunities
371
+
372
+ def _compute_context_similarity(
373
+ self,
374
+ strategy: str,
375
+ context: str,
376
+ agent: str,
377
+ ) -> float:
378
+ """Compute similarity between current context and past successful contexts."""
379
+ if not self.alma:
380
+ return 0.5 # Default when no memory available
381
+
382
+ try:
383
+ # Retrieve relevant memories
384
+ memories = self.alma.retrieve(
385
+ task=context,
386
+ agent=agent,
387
+ top_k=5,
388
+ )
389
+
390
+ if not memories:
391
+ return 0.3 # Low similarity for novel contexts
392
+
393
+ # Check if any outcomes match our strategy
394
+ if hasattr(memories, "outcomes"):
395
+ matching_outcomes = [
396
+ o
397
+ for o in memories.outcomes
398
+ if self._is_similar(strategy, getattr(o, "strategy_used", ""))
399
+ ]
400
+ if matching_outcomes:
401
+ return 0.8 # High similarity
402
+
403
+ # Check heuristics
404
+ if hasattr(memories, "heuristics"):
405
+ matching_heuristics = [
406
+ h
407
+ for h in memories.heuristics
408
+ if self._is_similar(strategy, getattr(h, "strategy", ""))
409
+ ]
410
+ if matching_heuristics:
411
+ return 0.7
412
+
413
+ return 0.5 # Moderate similarity
414
+
415
+ except Exception as e:
416
+ logger.warning(f"Failed to compute context similarity: {e}")
417
+ return 0.5
418
+
419
+ def _predict_success(
420
+ self,
421
+ strategy: str,
422
+ context: str,
423
+ agent: str,
424
+ historical_rate: float,
425
+ context_similarity: float,
426
+ ) -> float:
427
+ """
428
+ Predict success probability for the strategy in current context.
429
+
430
+ Combines historical rate with context similarity adjustment.
431
+ """
432
+ # Base prediction from historical rate
433
+ if historical_rate > 0:
434
+ base = historical_rate
435
+ else:
436
+ base = 0.5 # Unknown strategies start at 50%
437
+
438
+ # Adjust for context similarity
439
+ # High similarity → trust historical rate more
440
+ # Low similarity → regress toward 50%
441
+ similarity_weight = context_similarity
442
+ predicted = (similarity_weight * base) + ((1 - similarity_weight) * 0.5)
443
+
444
+ return predicted
445
+
446
+ def _compute_uncertainty(
447
+ self,
448
+ occurrence_count: int,
449
+ context_similarity: float,
450
+ ) -> float:
451
+ """
452
+ Compute uncertainty in the prediction.
453
+
454
+ Higher uncertainty when:
455
+ - Few occurrences (limited data)
456
+ - Low context similarity (novel situation)
457
+ """
458
+ # Uncertainty decreases with more data
459
+ if occurrence_count >= 10:
460
+ data_uncertainty = 0.1
461
+ elif occurrence_count >= 5:
462
+ data_uncertainty = 0.3
463
+ elif occurrence_count >= 2:
464
+ data_uncertainty = 0.5
465
+ else:
466
+ data_uncertainty = 0.8
467
+
468
+ # Uncertainty increases with novel contexts
469
+ context_uncertainty = 1.0 - context_similarity
470
+
471
+ # Combined uncertainty
472
+ return min(1.0, (data_uncertainty + context_uncertainty) / 2)
473
+
474
+ def _is_similar(self, text1: str, text2: str) -> bool:
475
+ """Simple similarity check between two texts."""
476
+ if not text1 or not text2:
477
+ return False
478
+
479
+ # Normalize
480
+ t1 = text1.lower().strip()
481
+ t2 = text2.lower().strip()
482
+
483
+ # Exact match
484
+ if t1 == t2:
485
+ return True
486
+
487
+ # Substring match
488
+ if t1 in t2 or t2 in t1:
489
+ return True
490
+
491
+ # Word overlap
492
+ words1 = set(t1.split())
493
+ words2 = set(t2.split())
494
+ overlap = len(words1 & words2)
495
+ total = len(words1 | words2)
496
+
497
+ if total > 0 and overlap / total >= 0.5:
498
+ return True
499
+
500
+ return False
501
+
502
+ def _generate_reasoning(self, signal: ConfidenceSignal) -> str:
503
+ """Generate human-readable reasoning for the assessment."""
504
+ parts = []
505
+
506
+ # Historical data
507
+ if signal.occurrence_count >= self.min_occurrences_for_confidence:
508
+ parts.append(
509
+ f"Historical data shows {signal.historical_success_rate:.0%} success rate "
510
+ f"over {signal.occurrence_count} uses."
511
+ )
512
+ elif signal.occurrence_count > 0:
513
+ parts.append(
514
+ f"Limited historical data ({signal.occurrence_count} uses) - "
515
+ f"prediction has higher uncertainty."
516
+ )
517
+ else:
518
+ parts.append("No historical data for this strategy - treating as novel.")
519
+
520
+ # Context similarity
521
+ if signal.context_similarity >= 0.7:
522
+ parts.append(
523
+ "Current context is highly similar to past successful applications."
524
+ )
525
+ elif signal.context_similarity <= 0.3:
526
+ parts.append("Current context is quite different from past applications.")
527
+
528
+ # Key risks
529
+ high_risks = [r for r in signal.risk_signals if r.severity >= 0.6]
530
+ if high_risks:
531
+ parts.append(f"WARNING: {len(high_risks)} significant risk(s) detected.")
532
+
533
+ # Key opportunities
534
+ strong_opps = [o for o in signal.opportunity_signals if o.strength >= 0.6]
535
+ if strong_opps:
536
+ parts.append(
537
+ f"POSITIVE: {len(strong_opps)} strong opportunity signal(s) detected."
538
+ )
539
+
540
+ return " ".join(parts)