alma-memory 0.4.0__py3-none-any.whl → 0.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (77) hide show
  1. alma/__init__.py +88 -44
  2. alma/confidence/__init__.py +1 -1
  3. alma/confidence/engine.py +92 -58
  4. alma/confidence/types.py +34 -14
  5. alma/config/loader.py +3 -2
  6. alma/consolidation/__init__.py +23 -0
  7. alma/consolidation/engine.py +678 -0
  8. alma/consolidation/prompts.py +84 -0
  9. alma/core.py +15 -15
  10. alma/domains/__init__.py +6 -6
  11. alma/domains/factory.py +12 -9
  12. alma/domains/schemas.py +17 -3
  13. alma/domains/types.py +8 -4
  14. alma/events/__init__.py +75 -0
  15. alma/events/emitter.py +284 -0
  16. alma/events/storage_mixin.py +246 -0
  17. alma/events/types.py +126 -0
  18. alma/events/webhook.py +425 -0
  19. alma/exceptions.py +49 -0
  20. alma/extraction/__init__.py +31 -0
  21. alma/extraction/auto_learner.py +264 -0
  22. alma/extraction/extractor.py +420 -0
  23. alma/graph/__init__.py +81 -0
  24. alma/graph/backends/__init__.py +18 -0
  25. alma/graph/backends/memory.py +236 -0
  26. alma/graph/backends/neo4j.py +417 -0
  27. alma/graph/base.py +159 -0
  28. alma/graph/extraction.py +198 -0
  29. alma/graph/store.py +860 -0
  30. alma/harness/__init__.py +4 -4
  31. alma/harness/base.py +18 -9
  32. alma/harness/domains.py +27 -11
  33. alma/initializer/__init__.py +1 -1
  34. alma/initializer/initializer.py +51 -43
  35. alma/initializer/types.py +25 -17
  36. alma/integration/__init__.py +9 -9
  37. alma/integration/claude_agents.py +10 -10
  38. alma/integration/helena.py +32 -22
  39. alma/integration/victor.py +57 -33
  40. alma/learning/__init__.py +27 -27
  41. alma/learning/forgetting.py +198 -148
  42. alma/learning/heuristic_extractor.py +40 -24
  43. alma/learning/protocols.py +62 -14
  44. alma/learning/validation.py +7 -2
  45. alma/mcp/__init__.py +4 -4
  46. alma/mcp/__main__.py +2 -1
  47. alma/mcp/resources.py +17 -16
  48. alma/mcp/server.py +102 -44
  49. alma/mcp/tools.py +174 -37
  50. alma/progress/__init__.py +3 -3
  51. alma/progress/tracker.py +26 -20
  52. alma/progress/types.py +8 -12
  53. alma/py.typed +0 -0
  54. alma/retrieval/__init__.py +11 -11
  55. alma/retrieval/cache.py +20 -21
  56. alma/retrieval/embeddings.py +4 -4
  57. alma/retrieval/engine.py +114 -35
  58. alma/retrieval/scoring.py +73 -63
  59. alma/session/__init__.py +2 -2
  60. alma/session/manager.py +5 -5
  61. alma/session/types.py +5 -4
  62. alma/storage/__init__.py +41 -0
  63. alma/storage/azure_cosmos.py +101 -31
  64. alma/storage/base.py +157 -4
  65. alma/storage/chroma.py +1443 -0
  66. alma/storage/file_based.py +56 -20
  67. alma/storage/pinecone.py +1080 -0
  68. alma/storage/postgresql.py +1452 -0
  69. alma/storage/qdrant.py +1306 -0
  70. alma/storage/sqlite_local.py +376 -31
  71. alma/types.py +62 -14
  72. alma_memory-0.5.0.dist-info/METADATA +905 -0
  73. alma_memory-0.5.0.dist-info/RECORD +76 -0
  74. {alma_memory-0.4.0.dist-info → alma_memory-0.5.0.dist-info}/WHEEL +1 -1
  75. alma_memory-0.4.0.dist-info/METADATA +0 -488
  76. alma_memory-0.4.0.dist-info/RECORD +0 -52
  77. {alma_memory-0.4.0.dist-info → alma_memory-0.5.0.dist-info}/top_level.txt +0 -0
alma/__init__.py CHANGED
@@ -17,79 +17,105 @@ memory slices before each run and updating memory after.
17
17
  __version__ = "0.4.0"
18
18
 
19
19
  # Core
20
+ # Confidence Engine (Phase 12)
21
+ from alma.confidence import (
22
+ ConfidenceEngine,
23
+ ConfidenceSignal,
24
+ OpportunitySignal,
25
+ RiskSignal,
26
+ )
27
+
28
+ # Consolidation Engine (Phase 13)
29
+ from alma.consolidation import (
30
+ ConsolidationEngine,
31
+ ConsolidationResult,
32
+ )
20
33
  from alma.core import ALMA
21
- from alma.types import (
22
- Heuristic,
23
- Outcome,
24
- UserPreference,
25
- DomainKnowledge,
26
- AntiPattern,
27
- MemorySlice,
28
- MemoryScope,
34
+
35
+ # Domain Memory Factory (Phase 10)
36
+ from alma.domains import (
37
+ DomainMemoryFactory,
38
+ DomainSchema,
39
+ EntityType,
40
+ RelationshipType,
41
+ get_coding_schema,
42
+ get_general_schema,
43
+ get_research_schema,
44
+ get_sales_schema,
45
+ )
46
+
47
+ # Event System (Phase 19)
48
+ from alma.events import (
49
+ EventEmitter,
50
+ MemoryEvent,
51
+ MemoryEventType,
52
+ WebhookConfig,
53
+ WebhookManager,
54
+ get_emitter,
55
+ )
56
+
57
+ # Exceptions
58
+ from alma.exceptions import (
59
+ ALMAError,
60
+ ConfigurationError,
61
+ EmbeddingError,
62
+ ExtractionError,
63
+ RetrievalError,
64
+ ScopeViolationError,
65
+ StorageError,
29
66
  )
30
67
 
31
68
  # Harness Pattern
32
69
  from alma.harness.base import (
33
- Setting,
34
- Context,
35
70
  Agent,
36
- MemorySchema,
71
+ Context,
37
72
  Harness,
73
+ MemorySchema,
74
+ RunResult,
75
+ Setting,
38
76
  Tool,
39
77
  ToolType,
40
- RunResult,
41
78
  )
42
79
  from alma.harness.domains import (
43
80
  CodingDomain,
44
- ResearchDomain,
45
81
  ContentDomain,
46
82
  OperationsDomain,
83
+ ResearchDomain,
47
84
  create_harness,
48
85
  )
49
86
 
87
+ # Session Initializer (Phase 11)
88
+ from alma.initializer import (
89
+ CodebaseOrientation,
90
+ InitializationResult,
91
+ RulesOfEngagement,
92
+ SessionInitializer,
93
+ )
94
+
50
95
  # Progress Tracking (Phase 10)
51
96
  from alma.progress import (
52
- WorkItem,
53
- WorkItemStatus,
54
97
  ProgressLog,
55
98
  ProgressSummary,
56
99
  ProgressTracker,
100
+ WorkItem,
101
+ WorkItemStatus,
57
102
  )
58
103
 
59
104
  # Session Management (Phase 10)
60
105
  from alma.session import (
61
- SessionHandoff,
62
106
  SessionContext,
63
- SessionOutcome,
107
+ SessionHandoff,
64
108
  SessionManager,
109
+ SessionOutcome,
65
110
  )
66
-
67
- # Domain Memory Factory (Phase 10)
68
- from alma.domains import (
69
- DomainSchema,
70
- EntityType,
71
- RelationshipType,
72
- DomainMemoryFactory,
73
- get_coding_schema,
74
- get_research_schema,
75
- get_sales_schema,
76
- get_general_schema,
77
- )
78
-
79
- # Session Initializer (Phase 11)
80
- from alma.initializer import (
81
- CodebaseOrientation,
82
- InitializationResult,
83
- RulesOfEngagement,
84
- SessionInitializer,
85
- )
86
-
87
- # Confidence Engine (Phase 12)
88
- from alma.confidence import (
89
- ConfidenceEngine,
90
- ConfidenceSignal,
91
- OpportunitySignal,
92
- RiskSignal,
111
+ from alma.types import (
112
+ AntiPattern,
113
+ DomainKnowledge,
114
+ Heuristic,
115
+ MemoryScope,
116
+ MemorySlice,
117
+ Outcome,
118
+ UserPreference,
93
119
  )
94
120
 
95
121
  __all__ = [
@@ -147,4 +173,22 @@ __all__ = [
147
173
  "ConfidenceSignal",
148
174
  "OpportunitySignal",
149
175
  "RiskSignal",
176
+ # Consolidation Engine
177
+ "ConsolidationEngine",
178
+ "ConsolidationResult",
179
+ # Event System
180
+ "MemoryEvent",
181
+ "MemoryEventType",
182
+ "EventEmitter",
183
+ "get_emitter",
184
+ "WebhookConfig",
185
+ "WebhookManager",
186
+ # Exceptions
187
+ "ALMAError",
188
+ "ConfigurationError",
189
+ "ScopeViolationError",
190
+ "StorageError",
191
+ "EmbeddingError",
192
+ "RetrievalError",
193
+ "ExtractionError",
150
194
  ]
@@ -30,13 +30,13 @@ Usage:
30
30
  )
31
31
  """
32
32
 
33
+ from alma.confidence.engine import ConfidenceEngine
33
34
  from alma.confidence.types import (
34
35
  ConfidenceSignal,
35
36
  OpportunitySignal,
36
37
  Recommendation,
37
38
  RiskSignal,
38
39
  )
39
- from alma.confidence.engine import ConfidenceEngine
40
40
 
41
41
  __all__ = [
42
42
  "ConfidenceSignal",
alma/confidence/engine.py CHANGED
@@ -95,11 +95,13 @@ class ConfidenceEngine:
95
95
 
96
96
  # 1. Load historical data from heuristic
97
97
  if heuristic:
98
- signal.occurrence_count = getattr(heuristic, 'occurrence_count', 0)
99
- success_count = getattr(heuristic, 'success_count', 0)
98
+ signal.occurrence_count = getattr(heuristic, "occurrence_count", 0)
99
+ success_count = getattr(heuristic, "success_count", 0)
100
100
  if signal.occurrence_count > 0:
101
101
  signal.historical_success_rate = success_count / signal.occurrence_count
102
- signal.metadata["heuristic_confidence"] = getattr(heuristic, 'confidence', 0.5)
102
+ signal.metadata["heuristic_confidence"] = getattr(
103
+ heuristic, "confidence", 0.5
104
+ )
103
105
 
104
106
  # 2. Analyze context similarity
105
107
  signal.context_similarity = self._compute_context_similarity(
@@ -214,30 +216,43 @@ class ConfidenceEngine:
214
216
  top_k=10,
215
217
  )
216
218
 
217
- if memories and hasattr(memories, 'anti_patterns'):
219
+ if memories and hasattr(memories, "anti_patterns"):
218
220
  for ap in memories.anti_patterns[:3]:
219
221
  # Check if this anti-pattern relates to our strategy
220
222
  if self._is_similar(strategy, ap.strategy):
221
- risks.append(RiskSignal(
222
- signal_type="similar_to_failure",
223
- description=f"Similar to known anti-pattern: {ap.reason[:100]}",
224
- severity=0.7,
225
- source=f"anti_pattern:{ap.id}",
226
- related_memories=[ap.id],
227
- ))
223
+ risks.append(
224
+ RiskSignal(
225
+ signal_type="similar_to_failure",
226
+ description=f"Similar to known anti-pattern: {ap.reason[:100]}",
227
+ severity=0.7,
228
+ source=f"anti_pattern:{ap.id}",
229
+ related_memories=[ap.id],
230
+ )
231
+ )
228
232
  except Exception as e:
229
233
  logger.warning(f"Failed to check anti-patterns: {e}")
230
234
 
231
235
  # Check for complexity indicators
232
- complexity_keywords = ["complex", "multiple", "all", "every", "entire", "complete"]
233
- complexity_score = sum(1 for kw in complexity_keywords if kw in strategy.lower())
236
+ complexity_keywords = [
237
+ "complex",
238
+ "multiple",
239
+ "all",
240
+ "every",
241
+ "entire",
242
+ "complete",
243
+ ]
244
+ complexity_score = sum(
245
+ 1 for kw in complexity_keywords if kw in strategy.lower()
246
+ )
234
247
  if complexity_score >= 2:
235
- risks.append(RiskSignal(
236
- signal_type="high_complexity",
237
- description="Strategy appears complex - consider breaking into smaller steps",
238
- severity=0.4,
239
- source="context_analysis",
240
- ))
248
+ risks.append(
249
+ RiskSignal(
250
+ signal_type="high_complexity",
251
+ description="Strategy appears complex - consider breaking into smaller steps",
252
+ severity=0.4,
253
+ source="context_analysis",
254
+ )
255
+ )
241
256
 
242
257
  # Check for risky patterns
243
258
  risky_patterns = [
@@ -248,12 +263,14 @@ class ConfidenceEngine:
248
263
  ]
249
264
  for pattern, description, severity in risky_patterns:
250
265
  if pattern in strategy.lower():
251
- risks.append(RiskSignal(
252
- signal_type="risky_pattern",
253
- description=description,
254
- severity=severity,
255
- source="pattern_match",
256
- ))
266
+ risks.append(
267
+ RiskSignal(
268
+ signal_type="risky_pattern",
269
+ description=description,
270
+ severity=severity,
271
+ source="pattern_match",
272
+ )
273
+ )
257
274
 
258
275
  return risks
259
276
 
@@ -291,34 +308,43 @@ class ConfidenceEngine:
291
308
  top_k=10,
292
309
  )
293
310
 
294
- if memories and hasattr(memories, 'heuristics'):
311
+ if memories and hasattr(memories, "heuristics"):
295
312
  for h in memories.heuristics[:3]:
296
313
  # Check success rate
297
314
  if h.occurrence_count >= self.min_occurrences_for_confidence:
298
- success_rate = h.success_count / h.occurrence_count if h.occurrence_count > 0 else 0
315
+ success_rate = (
316
+ h.success_count / h.occurrence_count
317
+ if h.occurrence_count > 0
318
+ else 0
319
+ )
299
320
  if success_rate >= 0.8:
300
- opportunities.append(OpportunitySignal(
301
- signal_type="proven_pattern",
302
- description=f"Proven strategy with {success_rate:.0%} success rate over {h.occurrence_count} uses",
303
- strength=min(0.9, success_rate),
304
- source=f"heuristic:{h.id}",
305
- related_memories=[h.id],
306
- ))
321
+ opportunities.append(
322
+ OpportunitySignal(
323
+ signal_type="proven_pattern",
324
+ description=f"Proven strategy with {success_rate:.0%} success rate over {h.occurrence_count} uses",
325
+ strength=min(0.9, success_rate),
326
+ source=f"heuristic:{h.id}",
327
+ related_memories=[h.id],
328
+ )
329
+ )
307
330
 
308
331
  # Check for recent successes in outcomes
309
- if hasattr(memories, 'outcomes'):
332
+ if hasattr(memories, "outcomes"):
310
333
  recent_successes = [
311
- o for o in memories.outcomes
312
- if getattr(o, 'outcome', '') == 'success'
334
+ o
335
+ for o in memories.outcomes
336
+ if getattr(o, "outcome", "") == "success"
313
337
  ][:3]
314
338
  if recent_successes:
315
- opportunities.append(OpportunitySignal(
316
- signal_type="recent_success",
317
- description=f"Similar approach succeeded recently ({len(recent_successes)} recent successes)",
318
- strength=0.6,
319
- source="outcome_analysis",
320
- related_memories=[o.id for o in recent_successes],
321
- ))
339
+ opportunities.append(
340
+ OpportunitySignal(
341
+ signal_type="recent_success",
342
+ description=f"Similar approach succeeded recently ({len(recent_successes)} recent successes)",
343
+ strength=0.6,
344
+ source="outcome_analysis",
345
+ related_memories=[o.id for o in recent_successes],
346
+ )
347
+ )
322
348
 
323
349
  except Exception as e:
324
350
  logger.warning(f"Failed to check opportunities: {e}")
@@ -332,12 +358,14 @@ class ConfidenceEngine:
332
358
  ]
333
359
  for pattern, description, strength in best_practices:
334
360
  if pattern in strategy.lower():
335
- opportunities.append(OpportunitySignal(
336
- signal_type="best_practice",
337
- description=description,
338
- strength=strength,
339
- source="pattern_match",
340
- ))
361
+ opportunities.append(
362
+ OpportunitySignal(
363
+ signal_type="best_practice",
364
+ description=description,
365
+ strength=strength,
366
+ source="pattern_match",
367
+ )
368
+ )
341
369
 
342
370
  return opportunities
343
371
 
@@ -363,19 +391,21 @@ class ConfidenceEngine:
363
391
  return 0.3 # Low similarity for novel contexts
364
392
 
365
393
  # Check if any outcomes match our strategy
366
- if hasattr(memories, 'outcomes'):
394
+ if hasattr(memories, "outcomes"):
367
395
  matching_outcomes = [
368
- o for o in memories.outcomes
369
- if self._is_similar(strategy, getattr(o, 'strategy_used', ''))
396
+ o
397
+ for o in memories.outcomes
398
+ if self._is_similar(strategy, getattr(o, "strategy_used", ""))
370
399
  ]
371
400
  if matching_outcomes:
372
401
  return 0.8 # High similarity
373
402
 
374
403
  # Check heuristics
375
- if hasattr(memories, 'heuristics'):
404
+ if hasattr(memories, "heuristics"):
376
405
  matching_heuristics = [
377
- h for h in memories.heuristics
378
- if self._is_similar(strategy, getattr(h, 'strategy', ''))
406
+ h
407
+ for h in memories.heuristics
408
+ if self._is_similar(strategy, getattr(h, "strategy", ""))
379
409
  ]
380
410
  if matching_heuristics:
381
411
  return 0.7
@@ -489,7 +519,9 @@ class ConfidenceEngine:
489
519
 
490
520
  # Context similarity
491
521
  if signal.context_similarity >= 0.7:
492
- parts.append("Current context is highly similar to past successful applications.")
522
+ parts.append(
523
+ "Current context is highly similar to past successful applications."
524
+ )
493
525
  elif signal.context_similarity <= 0.3:
494
526
  parts.append("Current context is quite different from past applications.")
495
527
 
@@ -501,6 +533,8 @@ class ConfidenceEngine:
501
533
  # Key opportunities
502
534
  strong_opps = [o for o in signal.opportunity_signals if o.strength >= 0.6]
503
535
  if strong_opps:
504
- parts.append(f"POSITIVE: {len(strong_opps)} strong opportunity signal(s) detected.")
536
+ parts.append(
537
+ f"POSITIVE: {len(strong_opps)} strong opportunity signal(s) detected."
538
+ )
505
539
 
506
540
  return " ".join(parts)
alma/confidence/types.py CHANGED
@@ -5,10 +5,10 @@ Forward-looking confidence signals for strategies.
5
5
  Inspired by Ilya Sutskever's insight: emotions are forward-looking value functions.
6
6
  """
7
7
 
8
+ import uuid
8
9
  from dataclasses import dataclass, field
9
10
  from datetime import datetime, timezone
10
11
  from typing import Any, Dict, List, Literal, Optional
11
- import uuid
12
12
 
13
13
 
14
14
  @dataclass
@@ -27,7 +27,9 @@ class RiskSignal:
27
27
  id: str = field(default_factory=lambda: str(uuid.uuid4()))
28
28
 
29
29
  # Type of risk
30
- signal_type: str = "" # "similar_to_failure", "untested_context", "high_complexity", etc.
30
+ signal_type: str = (
31
+ "" # "similar_to_failure", "untested_context", "high_complexity", etc.
32
+ )
31
33
 
32
34
  # Human-readable description
33
35
  description: str = ""
@@ -139,7 +141,9 @@ class ConfidenceSignal:
139
141
 
140
142
  # Forward-looking predictions (computed for current context)
141
143
  predicted_success: float = 0.5 # Expected success in THIS context
142
- uncertainty: float = 0.5 # How uncertain is the prediction (0=certain, 1=very uncertain)
144
+ uncertainty: float = (
145
+ 0.5 # How uncertain is the prediction (0=certain, 1=very uncertain)
146
+ )
143
147
  context_similarity: float = 0.0 # How similar is current context to past successes
144
148
 
145
149
  # Risk signals
@@ -225,26 +229,28 @@ class ConfidenceSignal:
225
229
  # Aggregate opportunity
226
230
  if self.opportunity_signals:
227
231
  # Use max opportunity as the dominant signal
228
- self.total_opportunity_score = max(o.strength for o in self.opportunity_signals)
232
+ self.total_opportunity_score = max(
233
+ o.strength for o in self.opportunity_signals
234
+ )
229
235
  else:
230
236
  self.total_opportunity_score = 0.0
231
237
 
232
238
  # Combined confidence score
233
239
  # Weighs historical success, predicted success, and risk/opportunity balance
234
240
  base_confidence = (
235
- 0.3 * self.historical_success_rate +
236
- 0.4 * self.predicted_success +
237
- 0.15 * self.context_similarity +
238
- 0.15 * (1.0 - self.uncertainty)
241
+ 0.3 * self.historical_success_rate
242
+ + 0.4 * self.predicted_success
243
+ + 0.15 * self.context_similarity
244
+ + 0.15 * (1.0 - self.uncertainty)
239
245
  )
240
246
 
241
247
  # Adjust for risk/opportunity
242
248
  risk_adjustment = -0.2 * self.total_risk_score
243
249
  opportunity_adjustment = 0.2 * self.total_opportunity_score
244
250
 
245
- self.confidence_score = max(0.0, min(1.0,
246
- base_confidence + risk_adjustment + opportunity_adjustment
247
- ))
251
+ self.confidence_score = max(
252
+ 0.0, min(1.0, base_confidence + risk_adjustment + opportunity_adjustment)
253
+ )
248
254
 
249
255
  # Determine recommendation
250
256
  self._update_recommendation()
@@ -277,7 +283,9 @@ class ConfidenceSignal:
277
283
 
278
284
  # Metrics
279
285
  lines.append("### Metrics")
280
- lines.append(f"- Historical success: {self.historical_success_rate:.0%} ({self.occurrence_count} uses)")
286
+ lines.append(
287
+ f"- Historical success: {self.historical_success_rate:.0%} ({self.occurrence_count} uses)"
288
+ )
281
289
  lines.append(f"- Predicted success: {self.predicted_success:.0%}")
282
290
  lines.append(f"- Context similarity: {self.context_similarity:.0%}")
283
291
  lines.append(f"- Uncertainty: {self.uncertainty:.0%}")
@@ -287,7 +295,13 @@ class ConfidenceSignal:
287
295
  if self.risk_signals:
288
296
  lines.append("### Risks")
289
297
  for risk in self.risk_signals:
290
- severity_label = "HIGH" if risk.severity >= 0.7 else "MEDIUM" if risk.severity >= 0.4 else "LOW"
298
+ severity_label = (
299
+ "HIGH"
300
+ if risk.severity >= 0.7
301
+ else "MEDIUM"
302
+ if risk.severity >= 0.4
303
+ else "LOW"
304
+ )
291
305
  lines.append(f"- [{severity_label}] {risk.description}")
292
306
  lines.append("")
293
307
 
@@ -295,7 +309,13 @@ class ConfidenceSignal:
295
309
  if self.opportunity_signals:
296
310
  lines.append("### Opportunities")
297
311
  for opp in self.opportunity_signals:
298
- strength_label = "STRONG" if opp.strength >= 0.7 else "MODERATE" if opp.strength >= 0.4 else "WEAK"
312
+ strength_label = (
313
+ "STRONG"
314
+ if opp.strength >= 0.7
315
+ else "MODERATE"
316
+ if opp.strength >= 0.4
317
+ else "WEAK"
318
+ )
299
319
  lines.append(f"- [{strength_label}] {opp.description}")
300
320
  lines.append("")
301
321
 
alma/config/loader.py CHANGED
@@ -5,10 +5,10 @@ Handles loading configuration from files and environment variables,
5
5
  with support for Azure Key Vault secret resolution.
6
6
  """
7
7
 
8
- import os
9
8
  import logging
9
+ import os
10
10
  from pathlib import Path
11
- from typing import Dict, Any, Optional
11
+ from typing import Any, Dict
12
12
 
13
13
  import yaml
14
14
 
@@ -78,6 +78,7 @@ class ConfigLoader:
78
78
 
79
79
  # Handle ${VAR} patterns
80
80
  import re
81
+
81
82
  pattern = r"\$\{([^}]+)\}"
82
83
 
83
84
  def replace(match):
@@ -0,0 +1,23 @@
1
+ """
2
+ ALMA Consolidation Module.
3
+
4
+ Provides memory consolidation capabilities for deduplicating and merging
5
+ similar memories, inspired by Mem0's core innovation.
6
+ """
7
+
8
+ from alma.consolidation.engine import ConsolidationEngine, ConsolidationResult
9
+ from alma.consolidation.prompts import (
10
+ MERGE_ANTI_PATTERNS_PROMPT,
11
+ MERGE_DOMAIN_KNOWLEDGE_PROMPT,
12
+ MERGE_HEURISTICS_PROMPT,
13
+ MERGE_OUTCOMES_PROMPT,
14
+ )
15
+
16
+ __all__ = [
17
+ "ConsolidationEngine",
18
+ "ConsolidationResult",
19
+ "MERGE_HEURISTICS_PROMPT",
20
+ "MERGE_DOMAIN_KNOWLEDGE_PROMPT",
21
+ "MERGE_ANTI_PATTERNS_PROMPT",
22
+ "MERGE_OUTCOMES_PROMPT",
23
+ ]