alma-memory 0.2.0__py3-none-any.whl → 0.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
alma/__init__.py CHANGED
@@ -14,7 +14,7 @@ This makes any tool-using agent appear to "learn" by injecting relevant
14
14
  memory slices before each run and updating memory after.
15
15
  """
16
16
 
17
- __version__ = "0.2.0"
17
+ __version__ = "0.4.0"
18
18
 
19
19
  # Core
20
20
  from alma.core import ALMA
@@ -47,6 +47,51 @@ from alma.harness.domains import (
47
47
  create_harness,
48
48
  )
49
49
 
50
+ # Progress Tracking (Phase 10)
51
+ from alma.progress import (
52
+ WorkItem,
53
+ WorkItemStatus,
54
+ ProgressLog,
55
+ ProgressSummary,
56
+ ProgressTracker,
57
+ )
58
+
59
+ # Session Management (Phase 10)
60
+ from alma.session import (
61
+ SessionHandoff,
62
+ SessionContext,
63
+ SessionOutcome,
64
+ SessionManager,
65
+ )
66
+
67
+ # Domain Memory Factory (Phase 10)
68
+ from alma.domains import (
69
+ DomainSchema,
70
+ EntityType,
71
+ RelationshipType,
72
+ DomainMemoryFactory,
73
+ get_coding_schema,
74
+ get_research_schema,
75
+ get_sales_schema,
76
+ get_general_schema,
77
+ )
78
+
79
+ # Session Initializer (Phase 11)
80
+ from alma.initializer import (
81
+ CodebaseOrientation,
82
+ InitializationResult,
83
+ RulesOfEngagement,
84
+ SessionInitializer,
85
+ )
86
+
87
+ # Confidence Engine (Phase 12)
88
+ from alma.confidence import (
89
+ ConfidenceEngine,
90
+ ConfidenceSignal,
91
+ OpportunitySignal,
92
+ RiskSignal,
93
+ )
94
+
50
95
  __all__ = [
51
96
  # Core
52
97
  "ALMA",
@@ -72,4 +117,34 @@ __all__ = [
72
117
  "ContentDomain",
73
118
  "OperationsDomain",
74
119
  "create_harness",
120
+ # Progress Tracking
121
+ "WorkItem",
122
+ "WorkItemStatus",
123
+ "ProgressLog",
124
+ "ProgressSummary",
125
+ "ProgressTracker",
126
+ # Session Management
127
+ "SessionHandoff",
128
+ "SessionContext",
129
+ "SessionOutcome",
130
+ "SessionManager",
131
+ # Domain Memory Factory
132
+ "DomainSchema",
133
+ "EntityType",
134
+ "RelationshipType",
135
+ "DomainMemoryFactory",
136
+ "get_coding_schema",
137
+ "get_research_schema",
138
+ "get_sales_schema",
139
+ "get_general_schema",
140
+ # Session Initializer
141
+ "CodebaseOrientation",
142
+ "InitializationResult",
143
+ "RulesOfEngagement",
144
+ "SessionInitializer",
145
+ # Confidence Engine
146
+ "ConfidenceEngine",
147
+ "ConfidenceSignal",
148
+ "OpportunitySignal",
149
+ "RiskSignal",
75
150
  ]
@@ -0,0 +1,47 @@
1
+ """
2
+ ALMA Confidence Module.
3
+
4
+ Forward-looking confidence signals for strategies.
5
+ Not just "this worked before" but "this will likely work now."
6
+
7
+ Inspired by Ilya Sutskever's insight: emotions are forward-looking value functions,
8
+ while reinforcement learning is backward-looking.
9
+
10
+ Usage:
11
+ from alma.confidence import ConfidenceEngine, ConfidenceSignal
12
+
13
+ engine = ConfidenceEngine(alma)
14
+
15
+ # Assess a strategy
16
+ signal = engine.assess_strategy(
17
+ strategy="Use incremental validation for forms",
18
+ context="Testing a registration form with 5 fields",
19
+ agent="Helena",
20
+ )
21
+
22
+ print(f"Confidence: {signal.confidence_score:.0%}")
23
+ print(f"Recommendation: {signal.recommendation}")
24
+
25
+ # Rank multiple strategies
26
+ rankings = engine.rank_strategies(
27
+ strategies=["Strategy A", "Strategy B"],
28
+ context="Current context",
29
+ agent="Helena",
30
+ )
31
+ """
32
+
33
+ from alma.confidence.types import (
34
+ ConfidenceSignal,
35
+ OpportunitySignal,
36
+ Recommendation,
37
+ RiskSignal,
38
+ )
39
+ from alma.confidence.engine import ConfidenceEngine
40
+
41
+ __all__ = [
42
+ "ConfidenceSignal",
43
+ "ConfidenceEngine",
44
+ "OpportunitySignal",
45
+ "Recommendation",
46
+ "RiskSignal",
47
+ ]
@@ -0,0 +1,506 @@
1
+ """
2
+ Confidence Engine.
3
+
4
+ Computes forward-looking confidence for strategies.
5
+ Not just "this worked before" but "this will likely work now."
6
+ """
7
+
8
+ import logging
9
+ from typing import Any, List, Optional, Tuple
10
+
11
+ from alma.confidence.types import (
12
+ ConfidenceSignal,
13
+ OpportunitySignal,
14
+ RiskSignal,
15
+ )
16
+
17
+ logger = logging.getLogger(__name__)
18
+
19
+
20
+ class ConfidenceEngine:
21
+ """
22
+ Compute forward-looking confidence for strategies.
23
+
24
+ The engine combines:
25
+ 1. Historical data (past outcomes, heuristics)
26
+ 2. Context analysis (similarity to past successes/failures)
27
+ 3. Risk detection (anti-patterns, untested contexts)
28
+ 4. Opportunity detection (proven patterns, recent successes)
29
+
30
+ Usage:
31
+ engine = ConfidenceEngine(alma)
32
+
33
+ # Assess a single strategy
34
+ signal = engine.assess_strategy(
35
+ strategy="Use incremental validation for forms",
36
+ context="Testing a 5-field registration form",
37
+ agent="Helena",
38
+ )
39
+ print(f"Confidence: {signal.confidence_score:.0%}")
40
+ print(f"Recommendation: {signal.recommendation}")
41
+
42
+ # Rank multiple strategies
43
+ rankings = engine.rank_strategies(
44
+ strategies=["Strategy A", "Strategy B", "Strategy C"],
45
+ context="Current task context",
46
+ agent="Helena",
47
+ )
48
+ for strategy, signal in rankings:
49
+ print(f"{strategy}: {signal.recommendation}")
50
+ """
51
+
52
+ def __init__(
53
+ self,
54
+ alma: Optional[Any] = None,
55
+ similarity_threshold: float = 0.7,
56
+ min_occurrences_for_confidence: int = 3,
57
+ ):
58
+ """
59
+ Initialize the ConfidenceEngine.
60
+
61
+ Args:
62
+ alma: ALMA instance for memory access
63
+ similarity_threshold: Minimum similarity for "high similarity" signals
64
+ min_occurrences_for_confidence: Minimum uses before trusting historical rate
65
+ """
66
+ self.alma = alma
67
+ self.similarity_threshold = similarity_threshold
68
+ self.min_occurrences_for_confidence = min_occurrences_for_confidence
69
+
70
+ def assess_strategy(
71
+ self,
72
+ strategy: str,
73
+ context: str,
74
+ agent: str,
75
+ heuristic: Optional[Any] = None,
76
+ ) -> ConfidenceSignal:
77
+ """
78
+ Assess confidence for a strategy in the current context.
79
+
80
+ Args:
81
+ strategy: The strategy to assess
82
+ context: Current task/context description
83
+ agent: Agent name
84
+ heuristic: Optional existing heuristic for this strategy
85
+
86
+ Returns:
87
+ ConfidenceSignal with full assessment
88
+ """
89
+ signal = ConfidenceSignal.create(
90
+ strategy=strategy,
91
+ context=context,
92
+ agent=agent,
93
+ heuristic_id=heuristic.id if heuristic else None,
94
+ )
95
+
96
+ # 1. Load historical data from heuristic
97
+ if heuristic:
98
+ signal.occurrence_count = getattr(heuristic, 'occurrence_count', 0)
99
+ success_count = getattr(heuristic, 'success_count', 0)
100
+ if signal.occurrence_count > 0:
101
+ signal.historical_success_rate = success_count / signal.occurrence_count
102
+ signal.metadata["heuristic_confidence"] = getattr(heuristic, 'confidence', 0.5)
103
+
104
+ # 2. Analyze context similarity
105
+ signal.context_similarity = self._compute_context_similarity(
106
+ strategy=strategy,
107
+ context=context,
108
+ agent=agent,
109
+ )
110
+
111
+ # 3. Compute predicted success
112
+ signal.predicted_success = self._predict_success(
113
+ strategy=strategy,
114
+ context=context,
115
+ agent=agent,
116
+ historical_rate=signal.historical_success_rate,
117
+ context_similarity=signal.context_similarity,
118
+ )
119
+
120
+ # 4. Compute uncertainty
121
+ signal.uncertainty = self._compute_uncertainty(
122
+ occurrence_count=signal.occurrence_count,
123
+ context_similarity=signal.context_similarity,
124
+ )
125
+
126
+ # 5. Detect risks
127
+ risks = self.detect_risks(strategy, context, agent)
128
+ for risk in risks:
129
+ signal.risk_signals.append(risk)
130
+
131
+ # 6. Detect opportunities
132
+ opportunities = self.detect_opportunities(strategy, context, agent)
133
+ for opp in opportunities:
134
+ signal.opportunity_signals.append(opp)
135
+
136
+ # 7. Recalculate scores (triggers recommendation update)
137
+ signal._recalculate_scores()
138
+
139
+ # 8. Generate reasoning
140
+ signal.reasoning = self._generate_reasoning(signal)
141
+
142
+ logger.debug(
143
+ f"Assessed strategy '{strategy[:30]}...': "
144
+ f"confidence={signal.confidence_score:.2f}, "
145
+ f"recommendation={signal.recommendation}"
146
+ )
147
+
148
+ return signal
149
+
150
+ def rank_strategies(
151
+ self,
152
+ strategies: List[str],
153
+ context: str,
154
+ agent: str,
155
+ ) -> List[Tuple[str, ConfidenceSignal]]:
156
+ """
157
+ Rank multiple strategies by confidence.
158
+
159
+ Args:
160
+ strategies: List of strategies to rank
161
+ context: Current context
162
+ agent: Agent name
163
+
164
+ Returns:
165
+ List of (strategy, signal) tuples, sorted by confidence (highest first)
166
+ """
167
+ results = []
168
+
169
+ for strategy in strategies:
170
+ signal = self.assess_strategy(
171
+ strategy=strategy,
172
+ context=context,
173
+ agent=agent,
174
+ )
175
+ results.append((strategy, signal))
176
+
177
+ # Sort by confidence score (highest first)
178
+ results.sort(key=lambda x: x[1].confidence_score, reverse=True)
179
+
180
+ return results
181
+
182
+ def detect_risks(
183
+ self,
184
+ strategy: str,
185
+ context: str,
186
+ agent: str,
187
+ ) -> List[RiskSignal]:
188
+ """
189
+ Detect risk signals for a strategy.
190
+
191
+ Checks for:
192
+ - Similar past failures (anti-patterns)
193
+ - Untested contexts
194
+ - High complexity indicators
195
+ - Missing prerequisites
196
+
197
+ Args:
198
+ strategy: Strategy to assess
199
+ context: Current context
200
+ agent: Agent name
201
+
202
+ Returns:
203
+ List of detected risk signals
204
+ """
205
+ risks = []
206
+
207
+ # Check for anti-patterns in ALMA
208
+ if self.alma:
209
+ try:
210
+ # Search for similar anti-patterns
211
+ memories = self.alma.retrieve(
212
+ task=f"{strategy} {context}",
213
+ agent=agent,
214
+ top_k=10,
215
+ )
216
+
217
+ if memories and hasattr(memories, 'anti_patterns'):
218
+ for ap in memories.anti_patterns[:3]:
219
+ # Check if this anti-pattern relates to our strategy
220
+ if self._is_similar(strategy, ap.strategy):
221
+ risks.append(RiskSignal(
222
+ signal_type="similar_to_failure",
223
+ description=f"Similar to known anti-pattern: {ap.reason[:100]}",
224
+ severity=0.7,
225
+ source=f"anti_pattern:{ap.id}",
226
+ related_memories=[ap.id],
227
+ ))
228
+ except Exception as e:
229
+ logger.warning(f"Failed to check anti-patterns: {e}")
230
+
231
+ # Check for complexity indicators
232
+ complexity_keywords = ["complex", "multiple", "all", "every", "entire", "complete"]
233
+ complexity_score = sum(1 for kw in complexity_keywords if kw in strategy.lower())
234
+ if complexity_score >= 2:
235
+ risks.append(RiskSignal(
236
+ signal_type="high_complexity",
237
+ description="Strategy appears complex - consider breaking into smaller steps",
238
+ severity=0.4,
239
+ source="context_analysis",
240
+ ))
241
+
242
+ # Check for risky patterns
243
+ risky_patterns = [
244
+ ("sleep", "Time-based waits can cause flaky behavior", 0.6),
245
+ ("force", "Force operations can have unintended side effects", 0.5),
246
+ ("delete all", "Bulk deletions are high-risk", 0.8),
247
+ ("production", "Production operations require extra caution", 0.7),
248
+ ]
249
+ for pattern, description, severity in risky_patterns:
250
+ if pattern in strategy.lower():
251
+ risks.append(RiskSignal(
252
+ signal_type="risky_pattern",
253
+ description=description,
254
+ severity=severity,
255
+ source="pattern_match",
256
+ ))
257
+
258
+ return risks
259
+
260
+ def detect_opportunities(
261
+ self,
262
+ strategy: str,
263
+ context: str,
264
+ agent: str,
265
+ ) -> List[OpportunitySignal]:
266
+ """
267
+ Detect opportunity signals for a strategy.
268
+
269
+ Checks for:
270
+ - Proven patterns with high success rate
271
+ - High similarity to past successes
272
+ - Recent successful uses
273
+ - Strong prerequisite matches
274
+
275
+ Args:
276
+ strategy: Strategy to assess
277
+ context: Current context
278
+ agent: Agent name
279
+
280
+ Returns:
281
+ List of detected opportunity signals
282
+ """
283
+ opportunities = []
284
+
285
+ # Check for matching heuristics in ALMA
286
+ if self.alma:
287
+ try:
288
+ memories = self.alma.retrieve(
289
+ task=f"{strategy} {context}",
290
+ agent=agent,
291
+ top_k=10,
292
+ )
293
+
294
+ if memories and hasattr(memories, 'heuristics'):
295
+ for h in memories.heuristics[:3]:
296
+ # Check success rate
297
+ if h.occurrence_count >= self.min_occurrences_for_confidence:
298
+ success_rate = h.success_count / h.occurrence_count if h.occurrence_count > 0 else 0
299
+ if success_rate >= 0.8:
300
+ opportunities.append(OpportunitySignal(
301
+ signal_type="proven_pattern",
302
+ description=f"Proven strategy with {success_rate:.0%} success rate over {h.occurrence_count} uses",
303
+ strength=min(0.9, success_rate),
304
+ source=f"heuristic:{h.id}",
305
+ related_memories=[h.id],
306
+ ))
307
+
308
+ # Check for recent successes in outcomes
309
+ if hasattr(memories, 'outcomes'):
310
+ recent_successes = [
311
+ o for o in memories.outcomes
312
+ if getattr(o, 'outcome', '') == 'success'
313
+ ][:3]
314
+ if recent_successes:
315
+ opportunities.append(OpportunitySignal(
316
+ signal_type="recent_success",
317
+ description=f"Similar approach succeeded recently ({len(recent_successes)} recent successes)",
318
+ strength=0.6,
319
+ source="outcome_analysis",
320
+ related_memories=[o.id for o in recent_successes],
321
+ ))
322
+
323
+ except Exception as e:
324
+ logger.warning(f"Failed to check opportunities: {e}")
325
+
326
+ # Check for best practice patterns
327
+ best_practices = [
328
+ ("incremental", "Incremental approaches reduce risk", 0.5),
329
+ ("test first", "Test-first approaches catch issues early", 0.6),
330
+ ("validate", "Validation prevents downstream errors", 0.5),
331
+ ("small steps", "Small steps are easier to debug", 0.4),
332
+ ]
333
+ for pattern, description, strength in best_practices:
334
+ if pattern in strategy.lower():
335
+ opportunities.append(OpportunitySignal(
336
+ signal_type="best_practice",
337
+ description=description,
338
+ strength=strength,
339
+ source="pattern_match",
340
+ ))
341
+
342
+ return opportunities
343
+
344
+ def _compute_context_similarity(
345
+ self,
346
+ strategy: str,
347
+ context: str,
348
+ agent: str,
349
+ ) -> float:
350
+ """Compute similarity between current context and past successful contexts."""
351
+ if not self.alma:
352
+ return 0.5 # Default when no memory available
353
+
354
+ try:
355
+ # Retrieve relevant memories
356
+ memories = self.alma.retrieve(
357
+ task=context,
358
+ agent=agent,
359
+ top_k=5,
360
+ )
361
+
362
+ if not memories:
363
+ return 0.3 # Low similarity for novel contexts
364
+
365
+ # Check if any outcomes match our strategy
366
+ if hasattr(memories, 'outcomes'):
367
+ matching_outcomes = [
368
+ o for o in memories.outcomes
369
+ if self._is_similar(strategy, getattr(o, 'strategy_used', ''))
370
+ ]
371
+ if matching_outcomes:
372
+ return 0.8 # High similarity
373
+
374
+ # Check heuristics
375
+ if hasattr(memories, 'heuristics'):
376
+ matching_heuristics = [
377
+ h for h in memories.heuristics
378
+ if self._is_similar(strategy, getattr(h, 'strategy', ''))
379
+ ]
380
+ if matching_heuristics:
381
+ return 0.7
382
+
383
+ return 0.5 # Moderate similarity
384
+
385
+ except Exception as e:
386
+ logger.warning(f"Failed to compute context similarity: {e}")
387
+ return 0.5
388
+
389
+ def _predict_success(
390
+ self,
391
+ strategy: str,
392
+ context: str,
393
+ agent: str,
394
+ historical_rate: float,
395
+ context_similarity: float,
396
+ ) -> float:
397
+ """
398
+ Predict success probability for the strategy in current context.
399
+
400
+ Combines historical rate with context similarity adjustment.
401
+ """
402
+ # Base prediction from historical rate
403
+ if historical_rate > 0:
404
+ base = historical_rate
405
+ else:
406
+ base = 0.5 # Unknown strategies start at 50%
407
+
408
+ # Adjust for context similarity
409
+ # High similarity → trust historical rate more
410
+ # Low similarity → regress toward 50%
411
+ similarity_weight = context_similarity
412
+ predicted = (similarity_weight * base) + ((1 - similarity_weight) * 0.5)
413
+
414
+ return predicted
415
+
416
+ def _compute_uncertainty(
417
+ self,
418
+ occurrence_count: int,
419
+ context_similarity: float,
420
+ ) -> float:
421
+ """
422
+ Compute uncertainty in the prediction.
423
+
424
+ Higher uncertainty when:
425
+ - Few occurrences (limited data)
426
+ - Low context similarity (novel situation)
427
+ """
428
+ # Uncertainty decreases with more data
429
+ if occurrence_count >= 10:
430
+ data_uncertainty = 0.1
431
+ elif occurrence_count >= 5:
432
+ data_uncertainty = 0.3
433
+ elif occurrence_count >= 2:
434
+ data_uncertainty = 0.5
435
+ else:
436
+ data_uncertainty = 0.8
437
+
438
+ # Uncertainty increases with novel contexts
439
+ context_uncertainty = 1.0 - context_similarity
440
+
441
+ # Combined uncertainty
442
+ return min(1.0, (data_uncertainty + context_uncertainty) / 2)
443
+
444
+ def _is_similar(self, text1: str, text2: str) -> bool:
445
+ """Simple similarity check between two texts."""
446
+ if not text1 or not text2:
447
+ return False
448
+
449
+ # Normalize
450
+ t1 = text1.lower().strip()
451
+ t2 = text2.lower().strip()
452
+
453
+ # Exact match
454
+ if t1 == t2:
455
+ return True
456
+
457
+ # Substring match
458
+ if t1 in t2 or t2 in t1:
459
+ return True
460
+
461
+ # Word overlap
462
+ words1 = set(t1.split())
463
+ words2 = set(t2.split())
464
+ overlap = len(words1 & words2)
465
+ total = len(words1 | words2)
466
+
467
+ if total > 0 and overlap / total >= 0.5:
468
+ return True
469
+
470
+ return False
471
+
472
+ def _generate_reasoning(self, signal: ConfidenceSignal) -> str:
473
+ """Generate human-readable reasoning for the assessment."""
474
+ parts = []
475
+
476
+ # Historical data
477
+ if signal.occurrence_count >= self.min_occurrences_for_confidence:
478
+ parts.append(
479
+ f"Historical data shows {signal.historical_success_rate:.0%} success rate "
480
+ f"over {signal.occurrence_count} uses."
481
+ )
482
+ elif signal.occurrence_count > 0:
483
+ parts.append(
484
+ f"Limited historical data ({signal.occurrence_count} uses) - "
485
+ f"prediction has higher uncertainty."
486
+ )
487
+ else:
488
+ parts.append("No historical data for this strategy - treating as novel.")
489
+
490
+ # Context similarity
491
+ if signal.context_similarity >= 0.7:
492
+ parts.append("Current context is highly similar to past successful applications.")
493
+ elif signal.context_similarity <= 0.3:
494
+ parts.append("Current context is quite different from past applications.")
495
+
496
+ # Key risks
497
+ high_risks = [r for r in signal.risk_signals if r.severity >= 0.6]
498
+ if high_risks:
499
+ parts.append(f"WARNING: {len(high_risks)} significant risk(s) detected.")
500
+
501
+ # Key opportunities
502
+ strong_opps = [o for o in signal.opportunity_signals if o.strength >= 0.6]
503
+ if strong_opps:
504
+ parts.append(f"POSITIVE: {len(strong_opps)} strong opportunity signal(s) detected.")
505
+
506
+ return " ".join(parts)