mcal-ai 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
mcal/__init__.py ADDED
@@ -0,0 +1,165 @@
1
+ """
2
+ MCAL: Memory-Context Alignment Layer
3
+
4
+ A standalone memory architecture for AI agents that preserves:
5
+ - Intent graphs (goal hierarchies)
6
+ - Reasoning chains (decision rationale)
7
+ - Goal-aware retrieval (objective-based context)
8
+
9
+ Usage:
10
+ from mcal import MCAL
11
+
12
+ mcal = MCAL(
13
+ openai_api_key="..." # or anthropic_api_key, or use bedrock
14
+ )
15
+
16
+ # Add conversation
17
+ result = await mcal.add(messages, user_id="user_123")
18
+
19
+ # Search with goal-awareness
20
+ context = await mcal.get_context("What's next?", user_id="user_123")
21
+ """
22
+
23
+ __version__ = "0.1.0"
24
+
25
+ # Main interface
26
+ from .mcal import MCAL, AddResult, SearchResult, TimingMetrics, TieredBedrockProvider
27
+
28
+ # Core models
29
+ from .core.models import (
30
+ DecisionTrail,
31
+ IntentGraph,
32
+ IntentNode,
33
+ IntentStatus,
34
+ IntentType,
35
+ Memory,
36
+ MemoryType,
37
+ RetrievalConfig,
38
+ RetrievalResult,
39
+ Session,
40
+ Turn,
41
+ )
42
+
43
+ # Core components (for advanced usage)
44
+ from .core.intent_tracker import IntentTracker
45
+ from .core.reasoning_store import ReasoningStore
46
+ from .core.goal_retriever import GoalRetriever, ContextAssembler
47
+
48
+ # Streaming API (Issue #10)
49
+ from .core.streaming import (
50
+ StreamEvent,
51
+ StreamEventType,
52
+ ExtractionPhase,
53
+ StreamProgress,
54
+ CacheHitInfo,
55
+ PhaseResult,
56
+ )
57
+
58
+ # Backends (MemoryEntry kept for compatibility)
59
+ from .backends import (
60
+ MemoryBackend,
61
+ MemoryEntry,
62
+ )
63
+
64
+ # Deprecated: Mem0Backend and StandaloneBackend (Issue #53)
65
+ # Use: from mcal.backends import Mem0Backend (requires pip install mcal[mem0])
66
+
67
+ __all__ = [
68
+ # Version
69
+ "__version__",
70
+ # Main interface
71
+ "MCAL",
72
+ "AddResult",
73
+ "SearchResult",
74
+ # Models
75
+ "DecisionTrail",
76
+ "IntentGraph",
77
+ "IntentNode",
78
+ "IntentStatus",
79
+ "IntentType",
80
+ "Memory",
81
+ "MemoryType",
82
+ "RetrievalConfig",
83
+ "RetrievalResult",
84
+ "Session",
85
+ "Turn",
86
+ # Core components
87
+ "IntentTracker",
88
+ "ReasoningStore",
89
+ "GoalRetriever",
90
+ "ContextAssembler",
91
+ # Backends (for advanced usage)
92
+ "MemoryBackend",
93
+ "MemoryEntry",
94
+ # Streaming (Issue #10)
95
+ "StreamEvent",
96
+ "StreamEventType",
97
+ "ExtractionPhase",
98
+ "StreamProgress",
99
+ "CacheHitInfo",
100
+ "PhaseResult",
101
+ # Integrations namespace
102
+ "integrations",
103
+ # Integration shortcuts (lazy loaded)
104
+ "LangGraphMemory",
105
+ "LangGraphStore",
106
+ "CrewAIMemory",
107
+ "AutoGenMemory",
108
+ "LangChainMemory",
109
+ ]
110
+
111
+
112
+ # Lazy loading for integration shortcuts
113
+ def __getattr__(name: str):
114
+ """Lazy load integration classes to avoid import errors."""
115
+ import importlib
116
+
117
+ if name == "integrations":
118
+ return importlib.import_module("mcal.integrations")
119
+
120
+ if name == "LangGraphMemory":
121
+ try:
122
+ mod = importlib.import_module("mcal.integrations.langgraph")
123
+ return mod.MCALMemory
124
+ except ImportError:
125
+ raise ImportError(
126
+ "LangGraph integration requires: pip install mcal[langgraph]"
127
+ )
128
+
129
+ if name == "LangGraphStore":
130
+ try:
131
+ mod = importlib.import_module("mcal.integrations.langgraph")
132
+ return mod.MCALStore
133
+ except ImportError:
134
+ raise ImportError(
135
+ "LangGraph integration requires: pip install mcal[langgraph]"
136
+ )
137
+
138
+ if name == "CrewAIMemory":
139
+ try:
140
+ mod = importlib.import_module("mcal.integrations.crewai")
141
+ return mod.MCALCrewMemory
142
+ except ImportError:
143
+ raise ImportError(
144
+ "CrewAI integration requires: pip install mcal[crewai]"
145
+ )
146
+
147
+ if name == "AutoGenMemory":
148
+ try:
149
+ mod = importlib.import_module("mcal.integrations.autogen")
150
+ return mod.MCALMemoryAgent
151
+ except ImportError:
152
+ raise ImportError(
153
+ "AutoGen integration requires: pip install mcal[autogen]"
154
+ )
155
+
156
+ if name == "LangChainMemory":
157
+ try:
158
+ mod = importlib.import_module("mcal.integrations.langchain")
159
+ return mod.MCALChatMemory
160
+ except ImportError:
161
+ raise ImportError(
162
+ "LangChain integration requires: pip install mcal[langchain]"
163
+ )
164
+
165
+ raise AttributeError(f"module 'mcal' has no attribute '{name}'")
@@ -0,0 +1,42 @@
1
+ """MCAL Memory Backends.
2
+
3
+ Issue #53: Mem0Backend and StandaloneBackend are deprecated.
4
+ MCAL is now standalone and doesn't require external backends.
5
+ These are kept for backward compatibility.
6
+ """
7
+
8
+ from .base import (
9
+ MemoryBackend,
10
+ MemoryEntry,
11
+ )
12
+
13
+ # Deprecated backends - import lazily to avoid requiring mem0ai
14
+ def __getattr__(name):
15
+ if name == "Mem0Backend":
16
+ import warnings
17
+ warnings.warn(
18
+ "Mem0Backend is deprecated. MCAL v1.0 is fully standalone. "
19
+ "Install with 'pip install mcal[mem0]' if needed.",
20
+ DeprecationWarning,
21
+ stacklevel=2
22
+ )
23
+ from .base import Mem0Backend
24
+ return Mem0Backend
25
+ elif name == "StandaloneBackend":
26
+ import warnings
27
+ warnings.warn(
28
+ "StandaloneBackend is deprecated. MCAL v1.0 is fully standalone.",
29
+ DeprecationWarning,
30
+ stacklevel=2
31
+ )
32
+ from .base import StandaloneBackend
33
+ return StandaloneBackend
34
+ raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
35
+
36
+ __all__ = [
37
+ "MemoryBackend",
38
+ "MemoryEntry",
39
+ # Deprecated (Issue #53)
40
+ "Mem0Backend",
41
+ "StandaloneBackend",
42
+ ]
mcal/backends/base.py ADDED
@@ -0,0 +1,383 @@
1
+ """
2
+ Memory Backends
3
+
4
+ Abstracts the underlying memory storage to allow pluggable backends.
5
+ Default implementation uses Mem0 for fact storage.
6
+
7
+ MCAL adds reasoning/intent layers ON TOP of these backends.
8
+ """
9
+
10
+ from __future__ import annotations
11
+
12
+ from abc import ABC, abstractmethod
13
+ from typing import Any, Optional
14
+ from datetime import datetime, timezone
15
+
16
+ from pydantic import BaseModel, Field
17
+
18
+
19
+ def _utc_now() -> datetime:
20
+ """Return current UTC time (timezone-aware)."""
21
+ return datetime.now(timezone.utc)
22
+
23
+
24
+ # =============================================================================
25
+ # Backend Protocol / Abstract Base
26
+ # =============================================================================
27
+
28
+ class MemoryEntry(BaseModel):
29
+ """Standardized memory entry across backends."""
30
+ id: str
31
+ content: str
32
+ metadata: dict = Field(default_factory=dict)
33
+ score: Optional[float] = None # Relevance score from search
34
+ created_at: Optional[datetime] = None
35
+ updated_at: Optional[datetime] = None
36
+
37
+
38
+ class MemoryBackend(ABC):
39
+ """
40
+ Abstract base for memory backends.
41
+
42
+ MCAL can work with different backends:
43
+ - Mem0Backend (default, recommended)
44
+ - StandaloneBackend (for testing or simple use cases)
45
+ - Custom backends (implement this interface)
46
+ """
47
+
48
+ @abstractmethod
49
+ def add(
50
+ self,
51
+ messages: list[dict],
52
+ user_id: str,
53
+ metadata: Optional[dict] = None
54
+ ) -> list[MemoryEntry]:
55
+ """
56
+ Add messages to memory, extracting facts.
57
+
58
+ Args:
59
+ messages: List of message dicts [{"role": "user", "content": "..."}]
60
+ user_id: User identifier
61
+ metadata: Optional metadata to attach
62
+
63
+ Returns:
64
+ List of extracted/stored memory entries
65
+ """
66
+ pass
67
+
68
+ @abstractmethod
69
+ def search(
70
+ self,
71
+ query: str,
72
+ user_id: str,
73
+ limit: int = 10
74
+ ) -> list[MemoryEntry]:
75
+ """
76
+ Search memories by semantic similarity.
77
+
78
+ Args:
79
+ query: Search query
80
+ user_id: User identifier
81
+ limit: Maximum results to return
82
+
83
+ Returns:
84
+ List of relevant memory entries with scores
85
+ """
86
+ pass
87
+
88
+ @abstractmethod
89
+ def get_all(self, user_id: str) -> list[MemoryEntry]:
90
+ """Get all memories for a user."""
91
+ pass
92
+
93
+ @abstractmethod
94
+ def delete(self, memory_id: str) -> bool:
95
+ """Delete a specific memory."""
96
+ pass
97
+
98
+ @abstractmethod
99
+ def delete_all(self, user_id: str) -> bool:
100
+ """Delete all memories for a user."""
101
+ pass
102
+
103
+
104
+ # =============================================================================
105
+ # Mem0 Backend Implementation
106
+ # =============================================================================
107
+
108
+ class Mem0Backend(MemoryBackend):
109
+ """
110
+ Mem0-based memory backend.
111
+
112
+ Leverages Mem0's production-ready infrastructure:
113
+ - Intelligent fact extraction
114
+ - Vector + Graph + Key-Value storage
115
+ - Deduplication and conflict resolution
116
+ - 90% token reduction
117
+
118
+ MCAL adds reasoning/intent layers on top.
119
+
120
+ Usage:
121
+ backend = Mem0Backend(config={
122
+ "llm": {"provider": "anthropic", "config": {"model": "claude-3-5-sonnet"}},
123
+ "embedder": {"provider": "openai", "config": {"model": "text-embedding-3-small"}}
124
+ })
125
+
126
+ # Or with API key for hosted Mem0
127
+ backend = Mem0Backend(api_key="your-mem0-api-key")
128
+ """
129
+
130
+ def __init__(
131
+ self,
132
+ config: Optional[dict] = None,
133
+ api_key: Optional[str] = None
134
+ ):
135
+ """
136
+ Initialize Mem0 backend.
137
+
138
+ Args:
139
+ config: Mem0 configuration dict (for self-hosted)
140
+ api_key: Mem0 API key (for hosted service)
141
+ """
142
+ try:
143
+ from mem0 import Memory, MemoryClient
144
+ except ImportError:
145
+ raise ImportError(
146
+ "mem0ai is required for Mem0Backend. "
147
+ "Install with: pip install mem0ai"
148
+ )
149
+
150
+ if api_key:
151
+ # Use hosted Mem0 service
152
+ self._client = MemoryClient(api_key=api_key)
153
+ self._is_hosted = True
154
+ else:
155
+ # Use self-hosted Mem0
156
+ self._client = Memory.from_config(config or self._default_config())
157
+ self._is_hosted = False
158
+
159
+ def _default_config(self) -> dict:
160
+ """Default Mem0 configuration using HuggingFace for local embeddings."""
161
+ return {
162
+ "llm": {
163
+ "provider": "aws_bedrock",
164
+ "config": {
165
+ "model": "us.meta.llama3-3-70b-instruct-v1:0",
166
+ "temperature": 0.1,
167
+ }
168
+ },
169
+ "embedder": {
170
+ "provider": "huggingface",
171
+ "config": {
172
+ "model": "sentence-transformers/all-MiniLM-L6-v2"
173
+ }
174
+ },
175
+ "vector_store": {
176
+ "provider": "faiss",
177
+ "config": {
178
+ "embedding_model_dims": 384, # all-MiniLM-L6-v2 dimension
179
+ "path": "/tmp/mcal_faiss"
180
+ }
181
+ }
182
+ }
183
+
184
+ def add(
185
+ self,
186
+ messages: list[dict],
187
+ user_id: str,
188
+ metadata: Optional[dict] = None
189
+ ) -> list[MemoryEntry]:
190
+ """Add messages to Mem0, extracting facts."""
191
+ result = self._client.add(
192
+ messages=messages,
193
+ user_id=user_id,
194
+ metadata=metadata or {}
195
+ )
196
+
197
+ # Convert Mem0 response to standardized format
198
+ entries = []
199
+
200
+ # Handle different Mem0 response formats
201
+ if isinstance(result, dict):
202
+ results_list = result.get("results", result.get("memories", []))
203
+ elif isinstance(result, list):
204
+ results_list = result
205
+ else:
206
+ results_list = []
207
+
208
+ for item in results_list:
209
+ if isinstance(item, dict):
210
+ entries.append(MemoryEntry(
211
+ id=item.get("id", ""),
212
+ content=item.get("memory", item.get("content", "")),
213
+ metadata=item.get("metadata", {}),
214
+ created_at=item.get("created_at"),
215
+ updated_at=item.get("updated_at")
216
+ ))
217
+
218
+ return entries
219
+
220
+ def search(
221
+ self,
222
+ query: str,
223
+ user_id: str,
224
+ limit: int = 10
225
+ ) -> list[MemoryEntry]:
226
+ """Search Mem0 memories by semantic similarity."""
227
+ result = self._client.search(
228
+ query=query,
229
+ user_id=user_id,
230
+ limit=limit
231
+ )
232
+
233
+ # Convert to standardized format
234
+ entries = []
235
+
236
+ if isinstance(result, dict):
237
+ results_list = result.get("results", result.get("memories", []))
238
+ elif isinstance(result, list):
239
+ results_list = result
240
+ else:
241
+ results_list = []
242
+
243
+ for item in results_list:
244
+ if isinstance(item, dict):
245
+ entries.append(MemoryEntry(
246
+ id=item.get("id", ""),
247
+ content=item.get("memory", item.get("content", "")),
248
+ metadata=item.get("metadata") or {},
249
+ score=item.get("score", item.get("similarity", 0.0))
250
+ ))
251
+
252
+ return entries
253
+
254
+ def get_all(self, user_id: str) -> list[MemoryEntry]:
255
+ """Get all memories for a user from Mem0."""
256
+ result = self._client.get_all(user_id=user_id)
257
+
258
+ entries = []
259
+ if isinstance(result, dict):
260
+ results_list = result.get("results", result.get("memories", []))
261
+ elif isinstance(result, list):
262
+ results_list = result
263
+ else:
264
+ results_list = []
265
+
266
+ for item in results_list:
267
+ if isinstance(item, dict):
268
+ entries.append(MemoryEntry(
269
+ id=item.get("id", ""),
270
+ content=item.get("memory", item.get("content", "")),
271
+ metadata=item.get("metadata", {})
272
+ ))
273
+
274
+ return entries
275
+
276
+ def delete(self, memory_id: str) -> bool:
277
+ """Delete a specific memory from Mem0."""
278
+ try:
279
+ self._client.delete(memory_id=memory_id)
280
+ return True
281
+ except Exception:
282
+ return False
283
+
284
+ def delete_all(self, user_id: str) -> bool:
285
+ """Delete all memories for a user from Mem0."""
286
+ try:
287
+ self._client.delete_all(user_id=user_id)
288
+ return True
289
+ except Exception:
290
+ return False
291
+
292
+
293
+ # =============================================================================
294
+ # Standalone Backend (for testing / simple use)
295
+ # =============================================================================
296
+
297
+ class StandaloneBackend(MemoryBackend):
298
+ """
299
+ Simple in-memory backend for testing or lightweight use.
300
+
301
+ Does NOT have Mem0's intelligent extraction - just stores raw content.
302
+ Useful for:
303
+ - Unit testing
304
+ - Development without API keys
305
+ - Simple use cases
306
+
307
+ For production, use Mem0Backend.
308
+ """
309
+
310
+ def __init__(self):
311
+ self._memories: dict[str, dict[str, MemoryEntry]] = {} # user_id -> {memory_id -> entry}
312
+ self._counter = 0
313
+
314
+ def add(
315
+ self,
316
+ messages: list[dict],
317
+ user_id: str,
318
+ metadata: Optional[dict] = None
319
+ ) -> list[MemoryEntry]:
320
+ """Store messages as simple memories (no intelligent extraction)."""
321
+ if user_id not in self._memories:
322
+ self._memories[user_id] = {}
323
+
324
+ entries = []
325
+ for msg in messages:
326
+ self._counter += 1
327
+ memory_id = f"mem_{self._counter}"
328
+
329
+ entry = MemoryEntry(
330
+ id=memory_id,
331
+ content=msg.get("content", ""),
332
+ metadata={
333
+ "role": msg.get("role", "unknown"),
334
+ **(metadata or {})
335
+ },
336
+ created_at=_utc_now()
337
+ )
338
+
339
+ self._memories[user_id][memory_id] = entry
340
+ entries.append(entry)
341
+
342
+ return entries
343
+
344
+ def search(
345
+ self,
346
+ query: str,
347
+ user_id: str,
348
+ limit: int = 10
349
+ ) -> list[MemoryEntry]:
350
+ """Simple keyword-based search (no semantic similarity)."""
351
+ if user_id not in self._memories:
352
+ return []
353
+
354
+ query_lower = query.lower()
355
+ results = []
356
+
357
+ for entry in self._memories[user_id].values():
358
+ if query_lower in entry.content.lower():
359
+ entry.score = 1.0 # Simple match
360
+ results.append(entry)
361
+
362
+ return results[:limit]
363
+
364
+ def get_all(self, user_id: str) -> list[MemoryEntry]:
365
+ """Get all memories for a user."""
366
+ if user_id not in self._memories:
367
+ return []
368
+ return list(self._memories[user_id].values())
369
+
370
+ def delete(self, memory_id: str) -> bool:
371
+ """Delete a specific memory."""
372
+ for user_memories in self._memories.values():
373
+ if memory_id in user_memories:
374
+ del user_memories[memory_id]
375
+ return True
376
+ return False
377
+
378
+ def delete_all(self, user_id: str) -> bool:
379
+ """Delete all memories for a user."""
380
+ if user_id in self._memories:
381
+ self._memories[user_id] = {}
382
+ return True
383
+ return False
@@ -0,0 +1 @@
1
+ """MCAL Baseline implementations for comparison."""
mcal/core/__init__.py ADDED
@@ -0,0 +1,101 @@
1
+ """MCAL Core Components"""
2
+
3
+ from .models import (
4
+ DecisionTrail,
5
+ IntentGraph,
6
+ IntentNode,
7
+ IntentStatus,
8
+ IntentType,
9
+ Memory,
10
+ MemoryType,
11
+ RetrievalConfig,
12
+ RetrievalResult,
13
+ Session,
14
+ Turn,
15
+ )
16
+ from .intent_tracker import IntentTracker
17
+ from .reasoning_store import ReasoningStore
18
+ from .goal_retriever import GoalRetriever, ContextAssembler
19
+ from .storage import MCALStorage
20
+ from .extraction_cache import ExtractionCache, CacheStats, ExtractionState
21
+ from .unified_extractor import (
22
+ UnifiedExtractor,
23
+ UnifiedGraph,
24
+ GraphNode,
25
+ GraphEdge,
26
+ NodeType,
27
+ EdgeType,
28
+ DeduplicationStrategy,
29
+ DeduplicationStats,
30
+ graph_to_memories,
31
+ memories_to_context_string,
32
+ )
33
+ from .streaming import (
34
+ StreamEvent,
35
+ StreamEventType,
36
+ ExtractionPhase,
37
+ StreamProgress,
38
+ CacheHitInfo,
39
+ PhaseResult,
40
+ )
41
+ from .embeddings import (
42
+ EmbeddingService,
43
+ embed_graph_nodes,
44
+ embedding_to_bytes,
45
+ bytes_to_embedding,
46
+ embedding_to_base64,
47
+ base64_to_embedding,
48
+ get_embedding_model,
49
+ clear_embedding_model,
50
+ )
51
+ from .vector_index import VectorIndex, build_index_from_nodes
52
+
53
+ __all__ = [
54
+ "DecisionTrail",
55
+ "IntentGraph",
56
+ "IntentNode",
57
+ "IntentStatus",
58
+ "IntentType",
59
+ "Memory",
60
+ "MemoryType",
61
+ "RetrievalConfig",
62
+ "RetrievalResult",
63
+ "Session",
64
+ "Turn",
65
+ "IntentTracker",
66
+ "ReasoningStore",
67
+ "GoalRetriever",
68
+ "ContextAssembler",
69
+ "MCALStorage",
70
+ "ExtractionCache",
71
+ "CacheStats",
72
+ "ExtractionState",
73
+ # Unified Extractor (optimized)
74
+ "UnifiedExtractor",
75
+ "UnifiedGraph",
76
+ "GraphNode",
77
+ "GraphEdge",
78
+ "NodeType",
79
+ "EdgeType",
80
+ "graph_to_memories",
81
+ "memories_to_context_string",
82
+ # Streaming
83
+ "StreamEvent",
84
+ "StreamEventType",
85
+ "ExtractionPhase",
86
+ "StreamProgress",
87
+ "CacheHitInfo",
88
+ "PhaseResult",
89
+ # Embeddings (Issue #50)
90
+ "EmbeddingService",
91
+ "embed_graph_nodes",
92
+ "embedding_to_bytes",
93
+ "bytes_to_embedding",
94
+ "embedding_to_base64",
95
+ "base64_to_embedding",
96
+ "get_embedding_model",
97
+ "clear_embedding_model",
98
+ # Vector Search (Issue #51)
99
+ "VectorIndex",
100
+ "build_index_from_nodes",
101
+ ]