AbstractMemory 0.0.1__tar.gz → 0.2.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. abstractmemory-0.2.1/AbstractMemory.egg-info/PKG-INFO +460 -0
  2. abstractmemory-0.2.1/AbstractMemory.egg-info/SOURCES.txt +26 -0
  3. abstractmemory-0.2.1/AbstractMemory.egg-info/requires.txt +20 -0
  4. {abstractmemory-0.0.1 → abstractmemory-0.2.1}/LICENSE +4 -1
  5. abstractmemory-0.2.1/PKG-INFO +460 -0
  6. abstractmemory-0.2.1/README.md +417 -0
  7. abstractmemory-0.2.1/abstractmemory/__init__.py +754 -0
  8. abstractmemory-0.2.1/abstractmemory/cognitive/__init__.py +1 -0
  9. abstractmemory-0.2.1/abstractmemory/components/__init__.py +1 -0
  10. abstractmemory-0.2.1/abstractmemory/components/core.py +112 -0
  11. abstractmemory-0.2.1/abstractmemory/components/episodic.py +68 -0
  12. abstractmemory-0.2.1/abstractmemory/components/semantic.py +102 -0
  13. abstractmemory-0.2.1/abstractmemory/components/working.py +50 -0
  14. abstractmemory-0.2.1/abstractmemory/core/__init__.py +1 -0
  15. abstractmemory-0.2.1/abstractmemory/core/interfaces.py +95 -0
  16. abstractmemory-0.2.1/abstractmemory/core/temporal.py +100 -0
  17. abstractmemory-0.2.1/abstractmemory/embeddings/__init__.py +317 -0
  18. abstractmemory-0.2.1/abstractmemory/graph/__init__.py +1 -0
  19. abstractmemory-0.2.1/abstractmemory/graph/knowledge_graph.py +178 -0
  20. abstractmemory-0.2.1/abstractmemory/simple.py +151 -0
  21. abstractmemory-0.2.1/abstractmemory/storage/__init__.py +16 -0
  22. abstractmemory-0.2.1/abstractmemory/storage/dual_manager.py +269 -0
  23. abstractmemory-0.2.1/abstractmemory/storage/lancedb_storage.py +544 -0
  24. abstractmemory-0.2.1/abstractmemory/storage/markdown_storage.py +447 -0
  25. abstractmemory-0.2.1/pyproject.toml +54 -0
  26. abstractmemory-0.0.1/PKG-INFO +0 -94
  27. abstractmemory-0.0.1/README.md +0 -67
  28. abstractmemory-0.0.1/pyproject.toml +0 -42
  29. abstractmemory-0.0.1/src/AbstractMemory.egg-info/PKG-INFO +0 -94
  30. abstractmemory-0.0.1/src/AbstractMemory.egg-info/SOURCES.txt +0 -8
  31. abstractmemory-0.0.1/src/abstractmemory/__init__.py +0 -41
  32. {abstractmemory-0.0.1/src → abstractmemory-0.2.1}/AbstractMemory.egg-info/dependency_links.txt +0 -0
  33. {abstractmemory-0.0.1/src → abstractmemory-0.2.1}/AbstractMemory.egg-info/top_level.txt +0 -0
  34. {abstractmemory-0.0.1 → abstractmemory-0.2.1}/setup.cfg +0 -0
@@ -0,0 +1,460 @@
1
+ Metadata-Version: 2.4
2
+ Name: AbstractMemory
3
+ Version: 0.2.1
4
+ Summary: Production-ready memory system for LLM agents - NO MOCKS, real semantic search, clear LLM vs embedding provider separation
5
+ Author-email: AbstractMemory Team <lpalbou@gmail.com>
6
+ Maintainer-email: AbstractMemory Team <palbou@gmail.com>
7
+ License-Expression: MIT
8
+ Project-URL: Homepage, https://github.com/lpalbou/AbstractAgent
9
+ Project-URL: Documentation, https://github.com/lpalbou/AbstractAgent#readme
10
+ Project-URL: Repository, https://github.com/lpalbou/AbstractAgent
11
+ Project-URL: Bug Reports, https://github.com/lpalbou/AbstractAgent/issues
12
+ Keywords: llm,memory,semantic-search,embeddings,ai,agents,knowledge-graph,temporal,grounded-memory,vector-search
13
+ Classifier: Development Status :: 5 - Production/Stable
14
+ Classifier: Intended Audience :: Developers
15
+ Classifier: Programming Language :: Python :: 3
16
+ Classifier: Programming Language :: Python :: 3.8
17
+ Classifier: Programming Language :: Python :: 3.9
18
+ Classifier: Programming Language :: Python :: 3.10
19
+ Classifier: Programming Language :: Python :: 3.11
20
+ Classifier: Programming Language :: Python :: 3.12
21
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
22
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
23
+ Classifier: Topic :: Software Development :: Libraries :: Application Frameworks
24
+ Requires-Python: >=3.8
25
+ Description-Content-Type: text/markdown
26
+ License-File: LICENSE
27
+ Requires-Dist: networkx>=3.0
28
+ Provides-Extra: dev
29
+ Requires-Dist: pytest; extra == "dev"
30
+ Requires-Dist: black; extra == "dev"
31
+ Requires-Dist: mypy; extra == "dev"
32
+ Provides-Extra: llm
33
+ Requires-Dist: abstractcore>=2.1.0; extra == "llm"
34
+ Provides-Extra: embeddings
35
+ Requires-Dist: abstractcore>=2.1.0; extra == "embeddings"
36
+ Requires-Dist: lancedb>=0.6.0; extra == "embeddings"
37
+ Provides-Extra: storage
38
+ Requires-Dist: lancedb>=0.6.0; extra == "storage"
39
+ Provides-Extra: all
40
+ Requires-Dist: abstractcore>=2.1.0; extra == "all"
41
+ Requires-Dist: lancedb>=0.6.0; extra == "all"
42
+ Dynamic: license-file
43
+
44
+ # AbstractMemory
45
+
46
+ **Intelligent memory system for LLM agents with two-tier architecture**
47
+
48
+ AbstractMemory provides efficient, purpose-built memory solutions for different types of LLM agents - from simple task-specific tools to sophisticated autonomous agents with persistent, grounded memory.
49
+
50
+ ## 🎯 Project Goals
51
+
52
+ AbstractMemory is part of the **AbstractLLM ecosystem** refactoring, designed to power both simple and complex AI agents:
53
+
54
+ - **Simple agents** (ReAct, task tools) get lightweight, efficient memory
55
+ - **Autonomous agents** get sophisticated temporal memory with user tracking
56
+ - **No over-engineering** - memory complexity matches agent purpose
57
+
58
+ ## 🏗️ Architecture Overview
59
+
60
+ ```
61
+ ┌─────────────────────────────────────────────────────────────┐
62
+ │ AbstractLLM Ecosystem │
63
+ ├─────────────────┬─────────────────┬─────────────────────────┤
64
+ │ AbstractCore │ AbstractMemory │ AbstractAgent │
65
+ │ │ │ │
66
+ │ • LLM Providers │ • Simple Memory │ • ReAct Agents │
67
+ │ • Sessions │ • Complex Memory│ • Autonomous Agents │
68
+ │ • Tools │ • Temporal KG │ • Multi-user Agents │
69
+ └─────────────────┴─────────────────┴─────────────────────────┘
70
+ ```
71
+
72
+ ## 🧠 Two-Tier Memory Strategy
73
+
74
+ ### Tier 1: Simple Memory (Task Agents)
75
+ Perfect for focused, single-purpose agents:
76
+
77
+ ```python
78
+ from abstractmemory import create_memory
79
+
80
+ # ReAct agent memory
81
+ scratchpad = create_memory("scratchpad", max_entries=50)
82
+ scratchpad.add_thought("User wants to learn Python")
83
+ scratchpad.add_action("search", {"query": "Python tutorials"})
84
+ scratchpad.add_observation("Found great tutorials")
85
+
86
+ # Simple chatbot memory
87
+ buffer = create_memory("buffer", max_messages=100)
88
+ buffer.add_message("user", "Hello!")
89
+ buffer.add_message("assistant", "Hi there!")
90
+ ```
91
+
92
+ ### Tier 2: Complex Memory (Autonomous Agents)
93
+ For sophisticated agents with persistence and learning:
94
+
95
+ ```python
96
+ # Autonomous agent with full memory capabilities
97
+ memory = create_memory("grounded", working_capacity=10, enable_kg=True)
98
+
99
+ # Multi-user context
100
+ memory.set_current_user("alice", relationship="owner")
101
+ memory.add_interaction("I love Python", "Python is excellent!")
102
+ memory.learn_about_user("Python developer")
103
+
104
+ # Get personalized context
105
+ context = memory.get_full_context("programming", user_id="alice")
106
+ ```
107
+
108
+ ## 🔧 Quick Start
109
+
110
+ ### Installation
111
+
112
+ ```bash
113
+ pip install abstractmemory
114
+
115
+ # For real LLM integration tests
116
+ pip install abstractmemory[llm]
117
+
118
+ # For LanceDB storage (optional)
119
+ pip install lancedb
120
+ ```
121
+
122
+ ### Basic Usage
123
+
124
+ ```python
125
+ from abstractmemory import create_memory
126
+
127
+ # 1. Choose memory type based on agent purpose
128
+ memory = create_memory("scratchpad") # Simple task agent
129
+ memory = create_memory("buffer") # Simple chatbot
130
+ memory = create_memory("grounded") # Autonomous agent
131
+
132
+ # 2. Use memory in your agent
133
+ if agent_type == "react":
134
+ memory.add_thought("Planning the solution...")
135
+ memory.add_action("execute", {"command": "analyze"})
136
+ memory.add_observation("Analysis complete")
137
+
138
+ elif agent_type == "autonomous":
139
+ memory.set_current_user("user123")
140
+ memory.add_interaction(user_input, agent_response)
141
+ context = memory.get_full_context(query)
142
+ ```
143
+
144
+ ### 🗂️ Persistent Storage Options
145
+
146
+ AbstractMemory now supports sophisticated storage for observable, searchable AI memory:
147
+
148
+ #### Observable Markdown Storage
149
+ Perfect for development, debugging, and transparency:
150
+
151
+ ```python
152
+ # Human-readable, version-controllable AI memory
153
+ memory = create_memory(
154
+ "grounded",
155
+ storage_backend="markdown",
156
+ storage_path="./memory"
157
+ )
158
+
159
+ # Generates organized structure:
160
+ # memory/
161
+ # ├── verbatim/alice/2025/09/24/10-30-45_python_int_abc123.md
162
+ # ├── experiential/2025/09/24/10-31-02_learning_note_def456.md
163
+ # ├── links/2025/09/24/int_abc123_to_note_def456.json
164
+ # └── index.json
165
+ ```
166
+
167
+ #### Powerful Vector Search
168
+ High-performance search with AbstractCore embeddings:
169
+
170
+ ```python
171
+ from abstractllm import create_llm
172
+
173
+ # Create provider with embedding support
174
+ provider = create_llm("openai", embedding_model="text-embedding-3-small")
175
+
176
+ # Vector search storage
177
+ memory = create_memory(
178
+ "grounded",
179
+ storage_backend="lancedb",
180
+ storage_uri="./memory.db",
181
+ embedding_provider=provider
182
+ )
183
+
184
+ # Semantic search across stored interactions
185
+ results = memory.search_stored_interactions("machine learning concepts")
186
+ ```
187
+
188
+ #### Dual Storage - Best of Both Worlds
189
+ Complete observability with powerful search:
190
+
191
+ ```python
192
+ # Dual storage: markdown (observable) + LanceDB (searchable)
193
+ memory = create_memory(
194
+ "grounded",
195
+ storage_backend="dual",
196
+ storage_path="./memory",
197
+ storage_uri="./memory.db",
198
+ embedding_provider=provider
199
+ )
200
+
201
+ # Every interaction stored in both formats
202
+ # - Markdown files for complete transparency
203
+ # - Vector database for semantic search
204
+ ```
205
+
206
+ ## 📚 Documentation
207
+
208
+ **👉 [START HERE: Complete Documentation Guide](docs/README.md)**
209
+
210
+ ### Core Guides
211
+ - **[🚀 Quick Start](docs/README.md#-start-here)** - Get running in 5 minutes
212
+ - **[🔍 Semantic Search](docs/semantic-search.md)** - Vector embeddings and similarity search
213
+ - **[🧠 Memory Types](docs/memory-types.md)** - ScratchpadMemory, BufferMemory, GroundedMemory
214
+ - **[📊 Performance Guide](docs/semantic-search.md#performance-characteristics)** - Embedding timing and optimization
215
+
216
+ ### Advanced Topics
217
+ - **[🏗️ Architecture](docs/architecture.md)** - System design and two-tier strategy
218
+ - **[💾 Storage Systems](docs/storage-systems.md)** - Markdown + LanceDB dual storage
219
+ - **[🎯 Usage Patterns](docs/usage-patterns.md)** - Real-world examples and best practices
220
+ - **[🔗 Integration Guide](docs/integration.md)** - AbstractLLM ecosystem integration
221
+ - **[📖 API Reference](docs/api-reference.md)** - Complete method documentation
222
+
223
+ ## 🔬 Key Features
224
+
225
+ ### ✅ Purpose-Built Memory Types
226
+ - **ScratchpadMemory**: ReAct thought-action-observation cycles for task agents
227
+ - **BufferMemory**: Simple conversation history with capacity limits
228
+ - **GroundedMemory**: Four-tier architecture with semantic search and temporal context
229
+
230
+ ### ✅ State-of-the-Art Research Integration
231
+ - **MemGPT/Letta Pattern**: Self-editing core memory
232
+ - **Temporal Grounding**: WHO (relational) + WHEN (temporal) context
233
+ - **Zep/Graphiti Architecture**: Bi-temporal knowledge graphs
234
+
235
+ ### ✅ Four-Tier Memory Architecture (Autonomous Agents)
236
+ ```
237
+ Core Memory ──→ Semantic Memory ──→ Working Memory ──→ Episodic Memory
238
+ (Identity) (Validated Facts) (Recent Context) (Event Archive)
239
+ ```
240
+
241
+ ### ✅ Learning Capabilities
242
+ - **Failure/Success Tracking**: Learn from experience
243
+ - **User Personalization**: Multi-user context separation
244
+ - **Fact Validation**: Confidence-based knowledge consolidation
245
+
246
+ ### ✅ Dual Storage Architecture
247
+ - **📄 Markdown Storage**: Human-readable, observable AI memory evolution
248
+ - **🔍 LanceDB Storage**: Vector search with SQL capabilities via AbstractCore
249
+ - **🔄 Dual Mode**: Best of both worlds - transparency + powerful search
250
+ - **🧠 AI Reflections**: Automatic experiential notes about interactions
251
+ - **🔗 Bidirectional Links**: Connect interactions to AI insights
252
+ - **📊 Search Capabilities**: Text-based and semantic similarity search
253
+
254
+ ### ✅ Semantic Search with AbstractCore
255
+ - **🎯 Real Embeddings**: Uses AbstractCore's EmbeddingManager with Google's EmbeddingGemma (768D)
256
+ - **⚡ Immediate Indexing**: Embeddings generated instantly during `add_interaction()` (~36ms)
257
+ - **🔍 Vector Similarity**: True semantic search finds contextually relevant content
258
+ - **🗄️ Dual Storage**: Observable markdown files + searchable LanceDB vectors
259
+ - **🎯 Production Ready**: Sub-second search, proven with 200+ real implementation tests
260
+
261
+ ## 🧪 Testing & Validation
262
+
263
+ AbstractMemory includes **200+ comprehensive tests** using ONLY real implementations:
264
+
265
+ ```bash
266
+ # Run all tests (NO MOCKS - only real implementations)
267
+ python -m pytest tests/ -v
268
+
269
+ # Run specific test suites
270
+ python -m pytest tests/simple/ -v # Simple memory types
271
+ python -m pytest tests/components/ -v # Memory components
272
+ python -m pytest tests/storage/ -v # Storage system tests
273
+ python -m pytest tests/integration/ -v # Full system integration
274
+
275
+ # Test with real LLM providers (requires AbstractCore)
276
+ python -m pytest tests/integration/test_llm_real_usage.py -v
277
+
278
+ # Test comprehensive dual storage with real embeddings
279
+ python -m pytest tests/storage/test_dual_storage_comprehensive.py -v
280
+ ```
281
+
282
+ **IMPORTANT**: All tests use real implementations:
283
+ - Real embedding providers (AbstractCore EmbeddingManager)
284
+ - Real LLM providers (Anthropic, OpenAI, Ollama via AbstractCore)
285
+ - Real memory components and storage systems
286
+ - NO MOCKS anywhere in the codebase
287
+
288
+ ## 🚀 Quick Start
289
+
290
+ ### Installation
291
+
292
+ ```bash
293
+ # Install with semantic search capabilities (recommended)
294
+ pip install abstractmemory[embeddings]
295
+
296
+ # Or install everything
297
+ pip install abstractmemory[all]
298
+
299
+ # Basic memory only (no semantic search)
300
+ pip install abstractmemory
301
+ ```
302
+
303
+ ### 📋 Upgrading from v0.1.0?
304
+
305
+ **Version 0.2.0 adds semantic search!** See [Migration Guide](CHANGELOG.md#-migration-guide) for:
306
+ - New AbstractCore dependency (`pip install abstractcore>=2.1.0`)
307
+ - LanceDB schema changes (recreate `.db` files)
308
+ - New `embedding_provider` parameter
309
+
310
+ ### ⚠️ Critical: LLM vs Embedding Provider Separation
311
+
312
+ **Understanding the difference between LLM and Embedding providers:**
313
+
314
+ - 🔄 **LLM Providers** (text generation): Change freely between Anthropic, OpenAI, Ollama, etc.
315
+ - 🔒 **Embedding Providers** (semantic search): Must remain consistent within a storage space
316
+
317
+ **For semantic search consistency:**
318
+ - ✅ **Choose ONE embedding model and stick with it per storage space**
319
+ - ✅ **You can customize which embedding model to use (AbstractCore, OpenAI, Ollama, etc.)**
320
+ - ❌ **Don't change embedding models mid-project - it breaks vector search**
321
+ - 🚨 **AbstractMemory automatically warns when embedding model changes detected**
322
+
323
+ **Example of correct separation:**
324
+ ```python
325
+ # LLM for text generation (can change anytime)
326
+ llm = create_llm("anthropic") # or "openai", "ollama", etc.
327
+
328
+ # Dedicated embedding provider (must stay consistent)
329
+ embedder = EmbeddingManager() # AbstractCore embeddings
330
+
331
+ memory = create_memory("grounded", embedding_provider=embedder) # NOT llm!
332
+ ```
333
+
334
+ ### Basic Usage
335
+
336
+ ```python
337
+ from abstractllm.embeddings import EmbeddingManager
338
+ from abstractmemory import create_memory
339
+
340
+ # 1. Create embedding manager for semantic search
341
+ em = EmbeddingManager() # Uses EmbeddingGemma (768D vectors)
342
+
343
+ # 2. Create memory with dual storage
344
+ memory = create_memory(
345
+ "grounded",
346
+ storage_backend="dual", # Markdown + LanceDB
347
+ storage_path="./memory_files", # Observable files
348
+ storage_uri="./memory.db", # Vector search
349
+ embedding_provider=em # Real embeddings
350
+ )
351
+
352
+ # 3. Add interactions (embeddings generated immediately!)
353
+ memory.set_current_user("alice")
354
+ memory.add_interaction(
355
+ "I'm working on machine learning projects",
356
+ "Great! ML has amazing applications in many fields."
357
+ )
358
+ # ↳ Takes ~36ms: embedding generated and stored instantly
359
+
360
+ # 4. Semantic search finds contextually relevant content
361
+ results = memory.search_stored_interactions("artificial intelligence research")
362
+ # ↳ Finds ML interaction via semantic similarity (not keywords!)
363
+ print(f"Found {len(results)} relevant conversations")
364
+ ```
365
+
366
+ ### 📋 What Happens When You Add Interactions
367
+
368
+ ```python
369
+ memory.add_interaction("I love Python", "Great choice!")
370
+ # ↓ IMMEDIATE PROCESSING:
371
+ # 1. Text combined: "I love Python Great choice!"
372
+ # 2. EmbeddingManager.embed() called (36ms)
373
+ # 3. 768D vector generated with EmbeddingGemma
374
+ # 4. Saved to markdown file: ./memory_files/verbatim/alice/...
375
+ # 5. Stored in LanceDB: vector + text + metadata
376
+ # 6. Interaction immediately searchable via semantic similarity
377
+ ```
378
+
379
+ ## 🔗 AbstractLLM Ecosystem Integration
380
+
381
+ AbstractMemory seamlessly integrates with AbstractCore, maintaining clear separation between LLM and embedding providers:
382
+
383
+ ### Critical Architecture: LLM vs Embedding Separation
384
+ ```python
385
+ from abstractllm import create_llm
386
+ from abstractllm.embeddings import EmbeddingManager
387
+ from abstractmemory import create_memory
388
+
389
+ # SEPARATE PROVIDERS for different purposes:
390
+
391
+ # 1. LLM Provider - for TEXT GENERATION (can change freely)
392
+ llm_provider = create_llm("anthropic", model="claude-3-5-haiku-latest")
393
+
394
+ # 2. Embedding Provider - for SEMANTIC SEARCH (must stay consistent)
395
+ embedding_provider = EmbeddingManager()
396
+
397
+ # Create memory with DEDICATED embedding provider
398
+ memory = create_memory(
399
+ "grounded",
400
+ enable_kg=True,
401
+ storage_backend="dual",
402
+ storage_path="./memory",
403
+ storage_uri="./memory.db",
404
+ embedding_provider=embedding_provider # DEDICATED for embeddings
405
+ )
406
+
407
+ # Use in agent reasoning with CLEAR separation
408
+ context = memory.get_full_context(query)
409
+ response = llm_provider.generate(prompt, system_prompt=context) # LLM for text
410
+ memory.add_interaction(query, response.content) # Embeddings handled internally
411
+
412
+ # Search uses embedding provider for semantic similarity
413
+ similar_memories = memory.search_stored_interactions("related concepts")
414
+ ```
415
+
416
+ ### Key Points:
417
+ - **LLM Provider**: Change freely between Anthropic ↔ OpenAI ↔ Ollama
418
+ - **Embedding Provider**: Must remain consistent within storage space
419
+ - **Never** pass LLM provider as embedding provider
420
+ - **Always** use dedicated embedding provider for semantic search
421
+
422
+ ### With AbstractAgent (Future)
423
+ ```python
424
+ from abstractagent import create_agent
425
+ from abstractmemory import create_memory
426
+
427
+ # Autonomous agent with sophisticated memory
428
+ memory = create_memory("grounded", working_capacity=20)
429
+ agent = create_agent("autonomous", memory=memory, provider=provider)
430
+
431
+ # Agent automatically uses memory for consistency and personalization
432
+ response = agent.execute(task, user_id="alice")
433
+ ```
434
+
435
+ ## 🏛️ Architecture Principles
436
+
437
+ 1. **No Over-Engineering**: Memory complexity matches agent requirements
438
+ 2. **Real Implementation Testing**: NO MOCKS anywhere - all tests use real implementations
439
+ 3. **SOTA Research Foundation**: Built on proven patterns (MemGPT, Zep, Graphiti)
440
+ 4. **Clean Abstractions**: Simple interfaces, powerful implementations
441
+ 5. **Performance Optimized**: Fast operations for simple agents, scalable for complex ones
442
+
443
+ ## 📈 Performance Characteristics
444
+
445
+ - **Simple Memory**: < 1ms operations, minimal overhead
446
+ - **Complex Memory**: < 100ms context generation, efficient consolidation
447
+ - **Scalability**: Handles thousands of memory items efficiently
448
+ - **Real LLM Integration**: Context + LLM calls complete in seconds
449
+
450
+ ## 🤝 Contributing
451
+
452
+ AbstractMemory is part of the AbstractLLM ecosystem. See [CONTRIBUTING.md](CONTRIBUTING.md) for development guidelines.
453
+
454
+ ## 📄 License
455
+
456
+ [License details]
457
+
458
+ ---
459
+
460
+ **AbstractMemory: Smart memory for smart agents** 🧠✨
@@ -0,0 +1,26 @@
1
+ LICENSE
2
+ README.md
3
+ pyproject.toml
4
+ AbstractMemory.egg-info/PKG-INFO
5
+ AbstractMemory.egg-info/SOURCES.txt
6
+ AbstractMemory.egg-info/dependency_links.txt
7
+ AbstractMemory.egg-info/requires.txt
8
+ AbstractMemory.egg-info/top_level.txt
9
+ abstractmemory/__init__.py
10
+ abstractmemory/simple.py
11
+ abstractmemory/cognitive/__init__.py
12
+ abstractmemory/components/__init__.py
13
+ abstractmemory/components/core.py
14
+ abstractmemory/components/episodic.py
15
+ abstractmemory/components/semantic.py
16
+ abstractmemory/components/working.py
17
+ abstractmemory/core/__init__.py
18
+ abstractmemory/core/interfaces.py
19
+ abstractmemory/core/temporal.py
20
+ abstractmemory/embeddings/__init__.py
21
+ abstractmemory/graph/__init__.py
22
+ abstractmemory/graph/knowledge_graph.py
23
+ abstractmemory/storage/__init__.py
24
+ abstractmemory/storage/dual_manager.py
25
+ abstractmemory/storage/lancedb_storage.py
26
+ abstractmemory/storage/markdown_storage.py
@@ -0,0 +1,20 @@
1
+ networkx>=3.0
2
+
3
+ [all]
4
+ abstractcore>=2.1.0
5
+ lancedb>=0.6.0
6
+
7
+ [dev]
8
+ pytest
9
+ black
10
+ mypy
11
+
12
+ [embeddings]
13
+ abstractcore>=2.1.0
14
+ lancedb>=0.6.0
15
+
16
+ [llm]
17
+ abstractcore>=2.1.0
18
+
19
+ [storage]
20
+ lancedb>=0.6.0
@@ -1,6 +1,6 @@
1
1
  MIT License
2
2
 
3
- Copyright (c) 2025 AbstractMemory Team
3
+ Copyright (c) 2025 Laurent-Philippe Albou
4
4
 
5
5
  Permission is hereby granted, free of charge, to any person obtaining a copy
6
6
  of this software and associated documentation files (the "Software"), to deal
@@ -19,3 +19,6 @@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
19
  LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
20
  OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
21
  SOFTWARE.
22
+
23
+
24
+