mem-llm 1.1.0__tar.gz → 1.3.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mem-llm might be problematic. Click here for more details.
- {mem_llm-1.1.0 → mem_llm-1.3.0}/CHANGELOG.md +108 -0
- {mem_llm-1.1.0/mem_llm.egg-info → mem_llm-1.3.0}/PKG-INFO +84 -110
- {mem_llm-1.1.0 → mem_llm-1.3.0}/README.md +62 -107
- {mem_llm-1.1.0 → mem_llm-1.3.0}/mem_llm/__init__.py +26 -3
- mem_llm-1.3.0/mem_llm/base_llm_client.py +175 -0
- mem_llm-1.3.0/mem_llm/clients/__init__.py +25 -0
- mem_llm-1.3.0/mem_llm/clients/gemini_client.py +381 -0
- mem_llm-1.3.0/mem_llm/clients/lmstudio_client.py +280 -0
- mem_llm-1.3.0/mem_llm/clients/ollama_client.py +268 -0
- {mem_llm-1.1.0 → mem_llm-1.3.0}/mem_llm/config_manager.py +1 -1
- mem_llm-1.3.0/mem_llm/conversation_summarizer.py +372 -0
- mem_llm-1.3.0/mem_llm/data_export_import.py +640 -0
- mem_llm-1.3.0/mem_llm/llm_client_factory.py +277 -0
- {mem_llm-1.1.0 → mem_llm-1.3.0}/mem_llm/mem_agent.py +154 -43
- {mem_llm-1.1.0 → mem_llm-1.3.0}/mem_llm/memory_db.py +7 -1
- {mem_llm-1.1.0 → mem_llm-1.3.0}/mem_llm/thread_safe_db.py +7 -1
- {mem_llm-1.1.0 → mem_llm-1.3.0/mem_llm.egg-info}/PKG-INFO +84 -110
- {mem_llm-1.1.0 → mem_llm-1.3.0}/mem_llm.egg-info/SOURCES.txt +11 -0
- mem_llm-1.3.0/mem_llm.egg-info/requires.txt +40 -0
- {mem_llm-1.1.0 → mem_llm-1.3.0}/pyproject.toml +27 -4
- {mem_llm-1.1.0 → mem_llm-1.3.0}/requirements-optional.txt +3 -2
- {mem_llm-1.1.0 → mem_llm-1.3.0}/requirements.txt +1 -0
- mem_llm-1.3.0/tests/test_conversation_summarizer.py +347 -0
- mem_llm-1.3.0/tests/test_data_export_import.py +405 -0
- mem_llm-1.3.0/tests/test_llm_backends.py +352 -0
- mem_llm-1.1.0/mem_llm.egg-info/requires.txt +0 -17
- {mem_llm-1.1.0 → mem_llm-1.3.0}/MANIFEST.in +0 -0
- {mem_llm-1.1.0 → mem_llm-1.3.0}/mem_llm/cli.py +0 -0
- {mem_llm-1.1.0 → mem_llm-1.3.0}/mem_llm/config.yaml.example +0 -0
- {mem_llm-1.1.0 → mem_llm-1.3.0}/mem_llm/config_from_docs.py +0 -0
- {mem_llm-1.1.0 → mem_llm-1.3.0}/mem_llm/dynamic_prompt.py +0 -0
- {mem_llm-1.1.0 → mem_llm-1.3.0}/mem_llm/knowledge_loader.py +0 -0
- {mem_llm-1.1.0 → mem_llm-1.3.0}/mem_llm/llm_client.py +0 -0
- {mem_llm-1.1.0 → mem_llm-1.3.0}/mem_llm/logger.py +0 -0
- {mem_llm-1.1.0 → mem_llm-1.3.0}/mem_llm/memory_manager.py +0 -0
- {mem_llm-1.1.0 → mem_llm-1.3.0}/mem_llm/memory_tools.py +0 -0
- {mem_llm-1.1.0 → mem_llm-1.3.0}/mem_llm/prompt_security.py +0 -0
- {mem_llm-1.1.0 → mem_llm-1.3.0}/mem_llm/retry_handler.py +0 -0
- {mem_llm-1.1.0 → mem_llm-1.3.0}/mem_llm.egg-info/dependency_links.txt +0 -0
- {mem_llm-1.1.0 → mem_llm-1.3.0}/mem_llm.egg-info/entry_points.txt +0 -0
- {mem_llm-1.1.0 → mem_llm-1.3.0}/mem_llm.egg-info/top_level.txt +0 -0
- {mem_llm-1.1.0 → mem_llm-1.3.0}/requirements-dev.txt +0 -0
- {mem_llm-1.1.0 → mem_llm-1.3.0}/setup.cfg +0 -0
- {mem_llm-1.1.0 → mem_llm-1.3.0}/tests/test_advanced_coverage.py +0 -0
- {mem_llm-1.1.0 → mem_llm-1.3.0}/tests/test_backward_compatibility.py +0 -0
- {mem_llm-1.1.0 → mem_llm-1.3.0}/tests/test_improvements.py +0 -0
- {mem_llm-1.1.0 → mem_llm-1.3.0}/tests/test_integration.py +0 -0
- {mem_llm-1.1.0 → mem_llm-1.3.0}/tests/test_llm_client.py +0 -0
- {mem_llm-1.1.0 → mem_llm-1.3.0}/tests/test_mem_agent.py +0 -0
- {mem_llm-1.1.0 → mem_llm-1.3.0}/tests/test_memory_manager.py +0 -0
- {mem_llm-1.1.0 → mem_llm-1.3.0}/tests/test_memory_tools.py +0 -0
- {mem_llm-1.1.0 → mem_llm-1.3.0}/tests/test_qwen3_model.py +0 -0
|
@@ -5,6 +5,114 @@ All notable changes to this project will be documented in this file.
|
|
|
5
5
|
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
|
6
6
|
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
|
7
7
|
|
|
8
|
+
## [1.3.0] - 2025-10-31
|
|
9
|
+
|
|
10
|
+
### 🎉 Major Features
|
|
11
|
+
|
|
12
|
+
- 🔌 **Multi-Backend LLM Support**: Choose your preferred LLM backend
|
|
13
|
+
- **Ollama**: Local, privacy-first, 100+ models
|
|
14
|
+
- **LM Studio**: Fast local inference with easy GUI
|
|
15
|
+
- **Google Gemini**: Powerful cloud models (gemini-2.5-flash)
|
|
16
|
+
- Unified API across all backends
|
|
17
|
+
- Seamless switching between backends
|
|
18
|
+
|
|
19
|
+
- 🏗️ **Factory Pattern Architecture**: Clean, extensible design
|
|
20
|
+
- `LLMClientFactory`: Central backend management
|
|
21
|
+
- `BaseLLMClient`: Abstract interface for all backends
|
|
22
|
+
- Easy to add new backends in the future
|
|
23
|
+
|
|
24
|
+
- 🔍 **Auto-Detection**: Automatically find available LLM service
|
|
25
|
+
- `auto_detect_backend=True` parameter
|
|
26
|
+
- Checks Ollama → LM Studio → other local services
|
|
27
|
+
- No manual configuration needed
|
|
28
|
+
|
|
29
|
+
### 🆕 New Components
|
|
30
|
+
|
|
31
|
+
- `BaseLLMClient`: Abstract base class for all LLM backends
|
|
32
|
+
- `LLMClientFactory`: Factory pattern for backend creation
|
|
33
|
+
- `OllamaClient` (refactored): Now inherits from BaseLLMClient
|
|
34
|
+
- `LMStudioClient`: OpenAI-compatible local inference
|
|
35
|
+
- `GeminiClient`: Google Gemini API integration
|
|
36
|
+
|
|
37
|
+
### 📚 New Examples
|
|
38
|
+
|
|
39
|
+
- `11_lmstudio_example.py`: Using LM Studio backend
|
|
40
|
+
- `12_gemini_example.py`: Using Google Gemini API
|
|
41
|
+
- `13_multi_backend_comparison.py`: Compare backend performance
|
|
42
|
+
- `14_auto_detect_backend.py`: Auto-detection feature
|
|
43
|
+
|
|
44
|
+
### 📖 New Documentation
|
|
45
|
+
|
|
46
|
+
- `MULTI_BACKEND_GUIDE.md`: Comprehensive guide for multi-backend setup
|
|
47
|
+
|
|
48
|
+
### 🔄 Changed
|
|
49
|
+
|
|
50
|
+
- **MemAgent**: Now supports multiple backends (backward compatible)
|
|
51
|
+
- **Examples**: All simplified for clarity
|
|
52
|
+
- **Package structure**: Better organized with `clients/` subdirectory
|
|
53
|
+
|
|
54
|
+
### ⚡ Improved
|
|
55
|
+
|
|
56
|
+
- **Backward Compatibility**: All v1.2.0 code still works
|
|
57
|
+
- **Error Messages**: Backend-specific troubleshooting
|
|
58
|
+
- **Connection Checks**: Improved availability detection
|
|
59
|
+
|
|
60
|
+
### 🧪 Testing
|
|
61
|
+
|
|
62
|
+
- 16+ new tests for multi-backend support
|
|
63
|
+
- Factory pattern tests
|
|
64
|
+
- Backend availability checks
|
|
65
|
+
- MemAgent integration tests
|
|
66
|
+
|
|
67
|
+
## [1.2.0] - 2025-10-21
|
|
68
|
+
|
|
69
|
+
### Added
|
|
70
|
+
|
|
71
|
+
- 📊 **Conversation Summarization**: Automatic conversation history compression
|
|
72
|
+
- `ConversationSummarizer`: Generates concise summaries from conversation histories
|
|
73
|
+
- `AutoSummarizer`: Threshold-based automatic summary updates
|
|
74
|
+
- Token compression: ~40-60% reduction in context size
|
|
75
|
+
- Key facts extraction: Automatic user profile insights
|
|
76
|
+
- Configurable thresholds and conversation limits
|
|
77
|
+
|
|
78
|
+
- 📤 **Data Export/Import System**: Multi-format and multi-database support
|
|
79
|
+
- `DataExporter`: Export conversations to JSON, CSV, SQLite, PostgreSQL, MongoDB
|
|
80
|
+
- `DataImporter`: Import from JSON, CSV, SQLite, PostgreSQL, MongoDB
|
|
81
|
+
- Auto-create databases: PostgreSQL and MongoDB databases created automatically if missing
|
|
82
|
+
- Enterprise-ready: Support for analytics (PostgreSQL) and real-time dashboards (MongoDB)
|
|
83
|
+
- Optional dependencies: `pip install mem-llm[postgresql]`, `pip install mem-llm[mongodb]`, `pip install mem-llm[databases]`
|
|
84
|
+
|
|
85
|
+
- 🗄️ **In-Memory Database Support**: Temporary database operations
|
|
86
|
+
- `db_path=":memory:"` parameter for MemAgent
|
|
87
|
+
- No file creation: Perfect for testing and temporary workflows
|
|
88
|
+
- Full SQL functionality without persistent storage
|
|
89
|
+
|
|
90
|
+
### Changed
|
|
91
|
+
|
|
92
|
+
- 🔇 **Reduced Logging Verbosity**: Cleaner console output
|
|
93
|
+
- Default log level changed from INFO to WARNING
|
|
94
|
+
- Less noise in production environments
|
|
95
|
+
- Users can still enable detailed logs via config
|
|
96
|
+
- Examples suppress logs for cleaner demonstrations
|
|
97
|
+
|
|
98
|
+
- 📦 **Enhanced Package Structure**: Better optional dependencies
|
|
99
|
+
- `pip install mem-llm[postgresql]` - PostgreSQL support only
|
|
100
|
+
- `pip install mem-llm[mongodb]` - MongoDB support only
|
|
101
|
+
- `pip install mem-llm[databases]` - Both PostgreSQL and MongoDB
|
|
102
|
+
- `pip install mem-llm[all]` - Everything included
|
|
103
|
+
|
|
104
|
+
### Fixed
|
|
105
|
+
|
|
106
|
+
- 🗄️ **Database Path Handling**: SQLite files now organized in memories/ folder
|
|
107
|
+
- All SQLite files (.db, .db-shm, .db-wal) now in memories/ directory
|
|
108
|
+
- Cleaner workspace: No database files cluttering project root
|
|
109
|
+
- Automatic directory creation: memories/ folder created if missing
|
|
110
|
+
|
|
111
|
+
- 🔧 **MemAgent db_path Parameter**: Added missing parameter
|
|
112
|
+
- New `db_path` parameter in MemAgent.__init__()
|
|
113
|
+
- Enables custom database locations and in-memory databases
|
|
114
|
+
- Better control over database file placement
|
|
115
|
+
|
|
8
116
|
## [1.1.0] - 2025-10-21
|
|
9
117
|
|
|
10
118
|
### Added
|
|
@@ -1,13 +1,13 @@
|
|
|
1
1
|
Metadata-Version: 2.2
|
|
2
2
|
Name: mem-llm
|
|
3
|
-
Version: 1.
|
|
4
|
-
Summary: Memory-enabled AI assistant with
|
|
3
|
+
Version: 1.3.0
|
|
4
|
+
Summary: Memory-enabled AI assistant with multi-backend LLM support (Ollama, LM Studio, Gemini) - Local and cloud ready
|
|
5
5
|
Author-email: "C. Emre Karataş" <karatasqemre@gmail.com>
|
|
6
6
|
License: MIT
|
|
7
7
|
Project-URL: Homepage, https://github.com/emredeveloper/Mem-LLM
|
|
8
8
|
Project-URL: Bug Reports, https://github.com/emredeveloper/Mem-LLM/issues
|
|
9
9
|
Project-URL: Source, https://github.com/emredeveloper/Mem-LLM
|
|
10
|
-
Keywords: llm,ai,memory,agent,chatbot,ollama,local
|
|
10
|
+
Keywords: llm,ai,memory,agent,chatbot,ollama,lmstudio,gemini,multi-backend,local
|
|
11
11
|
Classifier: Development Status :: 4 - Beta
|
|
12
12
|
Classifier: Intended Audience :: Developers
|
|
13
13
|
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
@@ -22,6 +22,7 @@ Description-Content-Type: text/markdown
|
|
|
22
22
|
Requires-Dist: requests>=2.31.0
|
|
23
23
|
Requires-Dist: pyyaml>=6.0.1
|
|
24
24
|
Requires-Dist: click>=8.1.0
|
|
25
|
+
Requires-Dist: google-generativeai>=0.3.0
|
|
25
26
|
Provides-Extra: dev
|
|
26
27
|
Requires-Dist: pytest>=7.4.0; extra == "dev"
|
|
27
28
|
Requires-Dist: pytest-cov>=4.1.0; extra == "dev"
|
|
@@ -33,6 +34,24 @@ Requires-Dist: flask-cors>=4.0.0; extra == "web"
|
|
|
33
34
|
Provides-Extra: api
|
|
34
35
|
Requires-Dist: fastapi>=0.104.0; extra == "api"
|
|
35
36
|
Requires-Dist: uvicorn>=0.24.0; extra == "api"
|
|
37
|
+
Provides-Extra: postgresql
|
|
38
|
+
Requires-Dist: psycopg2-binary>=2.9.9; extra == "postgresql"
|
|
39
|
+
Provides-Extra: mongodb
|
|
40
|
+
Requires-Dist: pymongo>=4.6.0; extra == "mongodb"
|
|
41
|
+
Provides-Extra: databases
|
|
42
|
+
Requires-Dist: psycopg2-binary>=2.9.9; extra == "databases"
|
|
43
|
+
Requires-Dist: pymongo>=4.6.0; extra == "databases"
|
|
44
|
+
Provides-Extra: all
|
|
45
|
+
Requires-Dist: pytest>=7.4.0; extra == "all"
|
|
46
|
+
Requires-Dist: pytest-cov>=4.1.0; extra == "all"
|
|
47
|
+
Requires-Dist: black>=23.7.0; extra == "all"
|
|
48
|
+
Requires-Dist: flake8>=6.1.0; extra == "all"
|
|
49
|
+
Requires-Dist: flask>=3.0.0; extra == "all"
|
|
50
|
+
Requires-Dist: flask-cors>=4.0.0; extra == "all"
|
|
51
|
+
Requires-Dist: fastapi>=0.104.0; extra == "all"
|
|
52
|
+
Requires-Dist: uvicorn>=0.24.0; extra == "all"
|
|
53
|
+
Requires-Dist: psycopg2-binary>=2.9.9; extra == "all"
|
|
54
|
+
Requires-Dist: pymongo>=4.6.0; extra == "all"
|
|
36
55
|
|
|
37
56
|
# 🧠 Mem-LLM
|
|
38
57
|
|
|
@@ -44,16 +63,23 @@ Requires-Dist: uvicorn>=0.24.0; extra == "api"
|
|
|
44
63
|
|
|
45
64
|
Mem-LLM is a powerful Python library that brings persistent memory capabilities to local Large Language Models. Build AI assistants that remember user interactions, manage knowledge bases, and work completely offline with Ollama.
|
|
46
65
|
|
|
47
|
-
##
|
|
66
|
+
## 🔗 Links
|
|
48
67
|
|
|
49
|
-
-
|
|
50
|
-
-
|
|
51
|
-
-
|
|
52
|
-
-
|
|
53
|
-
- 💾 **SQLite WAL Mode**: Write-Ahead Logging for better concurrency (15K+ msg/s)
|
|
54
|
-
- ✅ **100% Backward Compatible**: All v1.0.x code works without changes
|
|
68
|
+
- **PyPI**: https://pypi.org/project/mem-llm/
|
|
69
|
+
- **GitHub**: https://github.com/emredeveloper/Mem-LLM
|
|
70
|
+
- **Issues**: https://github.com/emredeveloper/Mem-LLM/issues
|
|
71
|
+
- **Documentation**: See examples/ directory
|
|
55
72
|
|
|
56
|
-
|
|
73
|
+
## 🆕 What's New in v1.2.0
|
|
74
|
+
|
|
75
|
+
- � **Conversation Summarization**: Automatic conversation compression (~40-60% token reduction)
|
|
76
|
+
- 📤 **Data Export/Import**: JSON, CSV, SQLite, PostgreSQL, MongoDB support
|
|
77
|
+
- 🗄️ **Multi-Database**: Enterprise-ready PostgreSQL & MongoDB integration
|
|
78
|
+
- �️ **In-Memory DB**: Use `:memory:` for temporary operations
|
|
79
|
+
- � **Cleaner Logs**: Default WARNING level for production-ready output
|
|
80
|
+
- � **Bug Fixes**: Database path handling, organized SQLite files
|
|
81
|
+
|
|
82
|
+
[See full changelog](CHANGELOG.md#120---2025-10-21)
|
|
57
83
|
|
|
58
84
|
## ✨ Key Features
|
|
59
85
|
|
|
@@ -70,15 +96,38 @@ Mem-LLM is a powerful Python library that brings persistent memory capabilities
|
|
|
70
96
|
- 🛡️ **Prompt Injection Protection** (v1.1.0+) - Advanced security against prompt attacks (opt-in)
|
|
71
97
|
- ⚡ **High Performance** (v1.1.0+) - Thread-safe operations, 15K+ msg/s throughput
|
|
72
98
|
- 🔄 **Retry Logic** (v1.1.0+) - Automatic exponential backoff for network errors
|
|
99
|
+
- 📊 **Conversation Summarization** (v1.2.0+) - Automatic token compression (~40-60% reduction)
|
|
100
|
+
- 📤 **Data Export/Import** (v1.2.0+) - Multi-format support (JSON, CSV, SQLite, PostgreSQL, MongoDB)
|
|
73
101
|
|
|
74
102
|
## 🚀 Quick Start
|
|
75
103
|
|
|
76
104
|
### Installation
|
|
77
105
|
|
|
106
|
+
**Basic Installation:**
|
|
78
107
|
```bash
|
|
79
108
|
pip install mem-llm
|
|
80
109
|
```
|
|
81
110
|
|
|
111
|
+
**With Optional Dependencies:**
|
|
112
|
+
```bash
|
|
113
|
+
# PostgreSQL support
|
|
114
|
+
pip install mem-llm[postgresql]
|
|
115
|
+
|
|
116
|
+
# MongoDB support
|
|
117
|
+
pip install mem-llm[mongodb]
|
|
118
|
+
|
|
119
|
+
# All database support (PostgreSQL + MongoDB)
|
|
120
|
+
pip install mem-llm[databases]
|
|
121
|
+
|
|
122
|
+
# All optional features
|
|
123
|
+
pip install mem-llm[all]
|
|
124
|
+
```
|
|
125
|
+
|
|
126
|
+
**Upgrade:**
|
|
127
|
+
```bash
|
|
128
|
+
pip install -U mem-llm
|
|
129
|
+
```
|
|
130
|
+
|
|
82
131
|
### Prerequisites
|
|
83
132
|
|
|
84
133
|
Install and start [Ollama](https://ollama.ai):
|
|
@@ -391,33 +440,8 @@ stats = agent.get_memory_stats()
|
|
|
391
440
|
- **MemoryTools**: Search, export, statistics
|
|
392
441
|
- **ConfigManager**: YAML configuration
|
|
393
442
|
- **CLI**: Command-line interface
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
Run the comprehensive test suite:
|
|
398
|
-
|
|
399
|
-
```bash
|
|
400
|
-
# Install dev dependencies
|
|
401
|
-
pip install -r requirements-dev.txt
|
|
402
|
-
|
|
403
|
-
# Run all tests (34+ automated tests)
|
|
404
|
-
cd tests
|
|
405
|
-
python run_all_tests.py
|
|
406
|
-
|
|
407
|
-
# Run specific test
|
|
408
|
-
python -m pytest test_mem_agent.py -v
|
|
409
|
-
```
|
|
410
|
-
|
|
411
|
-
### Test Coverage
|
|
412
|
-
- ✅ Core imports and dependencies
|
|
413
|
-
- ✅ CLI functionality
|
|
414
|
-
- ✅ Ollama connection and models
|
|
415
|
-
- ✅ JSON memory operations
|
|
416
|
-
- ✅ SQL memory operations
|
|
417
|
-
- ✅ MemAgent features
|
|
418
|
-
- ✅ Configuration management
|
|
419
|
-
- ✅ Multi-user scenarios
|
|
420
|
-
- ✅ Hallucination detection
|
|
443
|
+
- **ConversationSummarizer**: Token compression (v1.2.0+)
|
|
444
|
+
- **DataExporter/DataImporter**: Multi-database support (v1.2.0+)
|
|
421
445
|
|
|
422
446
|
## 📝 Examples
|
|
423
447
|
|
|
@@ -430,53 +454,32 @@ The `examples/` directory contains ready-to-run demonstrations:
|
|
|
430
454
|
5. **05_knowledge_base.py** - FAQ/support system
|
|
431
455
|
6. **06_cli_demo.py** - Command-line interface examples
|
|
432
456
|
7. **07_document_config.py** - Configuration from documents
|
|
457
|
+
8. **08_conversation_summarization.py** - Token compression with auto-summary (v1.2.0+)
|
|
458
|
+
9. **09_data_export_import.py** - Multi-format export/import demo (v1.2.0+)
|
|
459
|
+
10. **10_database_connection_test.py** - Enterprise PostgreSQL/MongoDB migration (v1.2.0+)
|
|
433
460
|
|
|
434
|
-
##
|
|
435
|
-
|
|
436
|
-
### Setup Development Environment
|
|
437
|
-
|
|
438
|
-
```bash
|
|
439
|
-
git clone https://github.com/emredeveloper/Mem-LLM.git
|
|
440
|
-
cd Mem-LLM
|
|
441
|
-
pip install -e .
|
|
442
|
-
pip install -r requirements-dev.txt
|
|
443
|
-
```
|
|
444
|
-
|
|
445
|
-
### Running Tests
|
|
446
|
-
|
|
447
|
-
```bash
|
|
448
|
-
pytest tests/ -v --cov=mem_llm
|
|
449
|
-
```
|
|
450
|
-
|
|
451
|
-
### Building Package
|
|
452
|
-
|
|
453
|
-
```bash
|
|
454
|
-
python -m build
|
|
455
|
-
twine upload dist/*
|
|
456
|
-
```
|
|
457
|
-
|
|
458
|
-
## 📋 Requirements
|
|
459
|
-
|
|
460
|
-
### Core Dependencies
|
|
461
|
-
- Python 3.8+
|
|
462
|
-
- requests>=2.31.0
|
|
463
|
-
- pyyaml>=6.0.1
|
|
464
|
-
- click>=8.1.0
|
|
465
|
-
|
|
466
|
-
### Optional Dependencies
|
|
467
|
-
- pytest>=7.4.0 (for testing)
|
|
468
|
-
- flask>=3.0.0 (for web interface)
|
|
469
|
-
- fastapi>=0.104.0 (for API server)
|
|
461
|
+
## 📊 Project Status
|
|
470
462
|
|
|
471
|
-
|
|
463
|
+
- **Version**: 1.2.0
|
|
464
|
+
- **Status**: Production Ready
|
|
465
|
+
- **Last Updated**: October 21, 2025
|
|
466
|
+
- **Test Coverage**: 16/16 automated tests (100% success rate)
|
|
467
|
+
- **Performance**: Thread-safe operations, <1ms search latency
|
|
468
|
+
- **Databases**: SQLite, PostgreSQL, MongoDB, In-Memory
|
|
472
469
|
|
|
473
|
-
|
|
470
|
+
## 📈 Roadmap
|
|
474
471
|
|
|
475
|
-
|
|
476
|
-
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
|
|
472
|
+
- [x] ~~Thread-safe operations~~ (v1.1.0)
|
|
473
|
+
- [x] ~~Prompt injection protection~~ (v1.1.0)
|
|
474
|
+
- [x] ~~Structured logging~~ (v1.1.0)
|
|
475
|
+
- [x] ~~Retry logic~~ (v1.1.0)
|
|
476
|
+
- [x] ~~Conversation Summarization~~ (v1.2.0)
|
|
477
|
+
- [x] ~~Multi-Database Export/Import~~ (v1.2.0)
|
|
478
|
+
- [x] ~~In-Memory Database~~ (v1.2.0)
|
|
479
|
+
- [ ] Web UI dashboard
|
|
480
|
+
- [ ] REST API server
|
|
481
|
+
- [ ] Vector database integration
|
|
482
|
+
- [ ] Advanced analytics dashboard
|
|
480
483
|
|
|
481
484
|
## 📄 License
|
|
482
485
|
|
|
@@ -494,35 +497,6 @@ This project is licensed under the MIT License - see the [LICENSE](LICENSE) file
|
|
|
494
497
|
- Inspired by the need for privacy-focused AI assistants
|
|
495
498
|
- Thanks to all contributors and users
|
|
496
499
|
|
|
497
|
-
## 📊 Project Status
|
|
498
|
-
|
|
499
|
-
- **Version**: 1.1.0
|
|
500
|
-
- **Status**: Production Ready
|
|
501
|
-
- **Last Updated**: October 21, 2025
|
|
502
|
-
- **Performance**: 15,346 msg/s write throughput, <1ms search latency
|
|
503
|
-
- **Thread-Safe**: Supports 200+ concurrent operations
|
|
504
|
-
- **Test Coverage**: 44+ automated tests (100% success rate)
|
|
505
|
-
|
|
506
|
-
## 🔗 Links
|
|
507
|
-
|
|
508
|
-
- **PyPI**: https://pypi.org/project/mem-llm/
|
|
509
|
-
- **GitHub**: https://github.com/emredeveloper/Mem-LLM
|
|
510
|
-
- **Issues**: https://github.com/emredeveloper/Mem-LLM/issues
|
|
511
|
-
- **Documentation**: See examples/ directory
|
|
512
|
-
|
|
513
|
-
## 📈 Roadmap
|
|
514
|
-
|
|
515
|
-
- [x] ~~Thread-safe operations~~ (v1.1.0)
|
|
516
|
-
- [x] ~~Prompt injection protection~~ (v1.1.0)
|
|
517
|
-
- [x] ~~Structured logging~~ (v1.1.0)
|
|
518
|
-
- [x] ~~Retry logic~~ (v1.1.0)
|
|
519
|
-
- [ ] Web UI dashboard
|
|
520
|
-
- [ ] REST API server
|
|
521
|
-
- [ ] Vector database integration
|
|
522
|
-
- [ ] Multi-language support
|
|
523
|
-
- [ ] Cloud backup options
|
|
524
|
-
- [ ] Advanced analytics
|
|
525
|
-
|
|
526
500
|
---
|
|
527
501
|
|
|
528
502
|
**⭐ If you find this project useful, please give it a star on GitHub!**
|
|
@@ -8,16 +8,23 @@
|
|
|
8
8
|
|
|
9
9
|
Mem-LLM is a powerful Python library that brings persistent memory capabilities to local Large Language Models. Build AI assistants that remember user interactions, manage knowledge bases, and work completely offline with Ollama.
|
|
10
10
|
|
|
11
|
-
##
|
|
11
|
+
## 🔗 Links
|
|
12
12
|
|
|
13
|
-
-
|
|
14
|
-
-
|
|
15
|
-
-
|
|
16
|
-
-
|
|
17
|
-
- 💾 **SQLite WAL Mode**: Write-Ahead Logging for better concurrency (15K+ msg/s)
|
|
18
|
-
- ✅ **100% Backward Compatible**: All v1.0.x code works without changes
|
|
13
|
+
- **PyPI**: https://pypi.org/project/mem-llm/
|
|
14
|
+
- **GitHub**: https://github.com/emredeveloper/Mem-LLM
|
|
15
|
+
- **Issues**: https://github.com/emredeveloper/Mem-LLM/issues
|
|
16
|
+
- **Documentation**: See examples/ directory
|
|
19
17
|
|
|
20
|
-
|
|
18
|
+
## 🆕 What's New in v1.2.0
|
|
19
|
+
|
|
20
|
+
- � **Conversation Summarization**: Automatic conversation compression (~40-60% token reduction)
|
|
21
|
+
- 📤 **Data Export/Import**: JSON, CSV, SQLite, PostgreSQL, MongoDB support
|
|
22
|
+
- 🗄️ **Multi-Database**: Enterprise-ready PostgreSQL & MongoDB integration
|
|
23
|
+
- �️ **In-Memory DB**: Use `:memory:` for temporary operations
|
|
24
|
+
- � **Cleaner Logs**: Default WARNING level for production-ready output
|
|
25
|
+
- � **Bug Fixes**: Database path handling, organized SQLite files
|
|
26
|
+
|
|
27
|
+
[See full changelog](CHANGELOG.md#120---2025-10-21)
|
|
21
28
|
|
|
22
29
|
## ✨ Key Features
|
|
23
30
|
|
|
@@ -34,15 +41,38 @@ Mem-LLM is a powerful Python library that brings persistent memory capabilities
|
|
|
34
41
|
- 🛡️ **Prompt Injection Protection** (v1.1.0+) - Advanced security against prompt attacks (opt-in)
|
|
35
42
|
- ⚡ **High Performance** (v1.1.0+) - Thread-safe operations, 15K+ msg/s throughput
|
|
36
43
|
- 🔄 **Retry Logic** (v1.1.0+) - Automatic exponential backoff for network errors
|
|
44
|
+
- 📊 **Conversation Summarization** (v1.2.0+) - Automatic token compression (~40-60% reduction)
|
|
45
|
+
- 📤 **Data Export/Import** (v1.2.0+) - Multi-format support (JSON, CSV, SQLite, PostgreSQL, MongoDB)
|
|
37
46
|
|
|
38
47
|
## 🚀 Quick Start
|
|
39
48
|
|
|
40
49
|
### Installation
|
|
41
50
|
|
|
51
|
+
**Basic Installation:**
|
|
42
52
|
```bash
|
|
43
53
|
pip install mem-llm
|
|
44
54
|
```
|
|
45
55
|
|
|
56
|
+
**With Optional Dependencies:**
|
|
57
|
+
```bash
|
|
58
|
+
# PostgreSQL support
|
|
59
|
+
pip install mem-llm[postgresql]
|
|
60
|
+
|
|
61
|
+
# MongoDB support
|
|
62
|
+
pip install mem-llm[mongodb]
|
|
63
|
+
|
|
64
|
+
# All database support (PostgreSQL + MongoDB)
|
|
65
|
+
pip install mem-llm[databases]
|
|
66
|
+
|
|
67
|
+
# All optional features
|
|
68
|
+
pip install mem-llm[all]
|
|
69
|
+
```
|
|
70
|
+
|
|
71
|
+
**Upgrade:**
|
|
72
|
+
```bash
|
|
73
|
+
pip install -U mem-llm
|
|
74
|
+
```
|
|
75
|
+
|
|
46
76
|
### Prerequisites
|
|
47
77
|
|
|
48
78
|
Install and start [Ollama](https://ollama.ai):
|
|
@@ -355,33 +385,8 @@ stats = agent.get_memory_stats()
|
|
|
355
385
|
- **MemoryTools**: Search, export, statistics
|
|
356
386
|
- **ConfigManager**: YAML configuration
|
|
357
387
|
- **CLI**: Command-line interface
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
Run the comprehensive test suite:
|
|
362
|
-
|
|
363
|
-
```bash
|
|
364
|
-
# Install dev dependencies
|
|
365
|
-
pip install -r requirements-dev.txt
|
|
366
|
-
|
|
367
|
-
# Run all tests (34+ automated tests)
|
|
368
|
-
cd tests
|
|
369
|
-
python run_all_tests.py
|
|
370
|
-
|
|
371
|
-
# Run specific test
|
|
372
|
-
python -m pytest test_mem_agent.py -v
|
|
373
|
-
```
|
|
374
|
-
|
|
375
|
-
### Test Coverage
|
|
376
|
-
- ✅ Core imports and dependencies
|
|
377
|
-
- ✅ CLI functionality
|
|
378
|
-
- ✅ Ollama connection and models
|
|
379
|
-
- ✅ JSON memory operations
|
|
380
|
-
- ✅ SQL memory operations
|
|
381
|
-
- ✅ MemAgent features
|
|
382
|
-
- ✅ Configuration management
|
|
383
|
-
- ✅ Multi-user scenarios
|
|
384
|
-
- ✅ Hallucination detection
|
|
388
|
+
- **ConversationSummarizer**: Token compression (v1.2.0+)
|
|
389
|
+
- **DataExporter/DataImporter**: Multi-database support (v1.2.0+)
|
|
385
390
|
|
|
386
391
|
## 📝 Examples
|
|
387
392
|
|
|
@@ -394,53 +399,32 @@ The `examples/` directory contains ready-to-run demonstrations:
|
|
|
394
399
|
5. **05_knowledge_base.py** - FAQ/support system
|
|
395
400
|
6. **06_cli_demo.py** - Command-line interface examples
|
|
396
401
|
7. **07_document_config.py** - Configuration from documents
|
|
402
|
+
8. **08_conversation_summarization.py** - Token compression with auto-summary (v1.2.0+)
|
|
403
|
+
9. **09_data_export_import.py** - Multi-format export/import demo (v1.2.0+)
|
|
404
|
+
10. **10_database_connection_test.py** - Enterprise PostgreSQL/MongoDB migration (v1.2.0+)
|
|
397
405
|
|
|
398
|
-
##
|
|
399
|
-
|
|
400
|
-
### Setup Development Environment
|
|
401
|
-
|
|
402
|
-
```bash
|
|
403
|
-
git clone https://github.com/emredeveloper/Mem-LLM.git
|
|
404
|
-
cd Mem-LLM
|
|
405
|
-
pip install -e .
|
|
406
|
-
pip install -r requirements-dev.txt
|
|
407
|
-
```
|
|
408
|
-
|
|
409
|
-
### Running Tests
|
|
410
|
-
|
|
411
|
-
```bash
|
|
412
|
-
pytest tests/ -v --cov=mem_llm
|
|
413
|
-
```
|
|
414
|
-
|
|
415
|
-
### Building Package
|
|
416
|
-
|
|
417
|
-
```bash
|
|
418
|
-
python -m build
|
|
419
|
-
twine upload dist/*
|
|
420
|
-
```
|
|
421
|
-
|
|
422
|
-
## 📋 Requirements
|
|
423
|
-
|
|
424
|
-
### Core Dependencies
|
|
425
|
-
- Python 3.8+
|
|
426
|
-
- requests>=2.31.0
|
|
427
|
-
- pyyaml>=6.0.1
|
|
428
|
-
- click>=8.1.0
|
|
429
|
-
|
|
430
|
-
### Optional Dependencies
|
|
431
|
-
- pytest>=7.4.0 (for testing)
|
|
432
|
-
- flask>=3.0.0 (for web interface)
|
|
433
|
-
- fastapi>=0.104.0 (for API server)
|
|
406
|
+
## 📊 Project Status
|
|
434
407
|
|
|
435
|
-
|
|
408
|
+
- **Version**: 1.2.0
|
|
409
|
+
- **Status**: Production Ready
|
|
410
|
+
- **Last Updated**: October 21, 2025
|
|
411
|
+
- **Test Coverage**: 16/16 automated tests (100% success rate)
|
|
412
|
+
- **Performance**: Thread-safe operations, <1ms search latency
|
|
413
|
+
- **Databases**: SQLite, PostgreSQL, MongoDB, In-Memory
|
|
436
414
|
|
|
437
|
-
|
|
415
|
+
## 📈 Roadmap
|
|
438
416
|
|
|
439
|
-
|
|
440
|
-
|
|
441
|
-
|
|
442
|
-
|
|
443
|
-
|
|
417
|
+
- [x] ~~Thread-safe operations~~ (v1.1.0)
|
|
418
|
+
- [x] ~~Prompt injection protection~~ (v1.1.0)
|
|
419
|
+
- [x] ~~Structured logging~~ (v1.1.0)
|
|
420
|
+
- [x] ~~Retry logic~~ (v1.1.0)
|
|
421
|
+
- [x] ~~Conversation Summarization~~ (v1.2.0)
|
|
422
|
+
- [x] ~~Multi-Database Export/Import~~ (v1.2.0)
|
|
423
|
+
- [x] ~~In-Memory Database~~ (v1.2.0)
|
|
424
|
+
- [ ] Web UI dashboard
|
|
425
|
+
- [ ] REST API server
|
|
426
|
+
- [ ] Vector database integration
|
|
427
|
+
- [ ] Advanced analytics dashboard
|
|
444
428
|
|
|
445
429
|
## 📄 License
|
|
446
430
|
|
|
@@ -458,35 +442,6 @@ This project is licensed under the MIT License - see the [LICENSE](LICENSE) file
|
|
|
458
442
|
- Inspired by the need for privacy-focused AI assistants
|
|
459
443
|
- Thanks to all contributors and users
|
|
460
444
|
|
|
461
|
-
## 📊 Project Status
|
|
462
|
-
|
|
463
|
-
- **Version**: 1.1.0
|
|
464
|
-
- **Status**: Production Ready
|
|
465
|
-
- **Last Updated**: October 21, 2025
|
|
466
|
-
- **Performance**: 15,346 msg/s write throughput, <1ms search latency
|
|
467
|
-
- **Thread-Safe**: Supports 200+ concurrent operations
|
|
468
|
-
- **Test Coverage**: 44+ automated tests (100% success rate)
|
|
469
|
-
|
|
470
|
-
## 🔗 Links
|
|
471
|
-
|
|
472
|
-
- **PyPI**: https://pypi.org/project/mem-llm/
|
|
473
|
-
- **GitHub**: https://github.com/emredeveloper/Mem-LLM
|
|
474
|
-
- **Issues**: https://github.com/emredeveloper/Mem-LLM/issues
|
|
475
|
-
- **Documentation**: See examples/ directory
|
|
476
|
-
|
|
477
|
-
## 📈 Roadmap
|
|
478
|
-
|
|
479
|
-
- [x] ~~Thread-safe operations~~ (v1.1.0)
|
|
480
|
-
- [x] ~~Prompt injection protection~~ (v1.1.0)
|
|
481
|
-
- [x] ~~Structured logging~~ (v1.1.0)
|
|
482
|
-
- [x] ~~Retry logic~~ (v1.1.0)
|
|
483
|
-
- [ ] Web UI dashboard
|
|
484
|
-
- [ ] REST API server
|
|
485
|
-
- [ ] Vector database integration
|
|
486
|
-
- [ ] Multi-language support
|
|
487
|
-
- [ ] Cloud backup options
|
|
488
|
-
- [ ] Advanced analytics
|
|
489
|
-
|
|
490
445
|
---
|
|
491
446
|
|
|
492
447
|
**⭐ If you find this project useful, please give it a star on GitHub!**
|
|
@@ -5,7 +5,13 @@ AI library that remembers user interactions
|
|
|
5
5
|
|
|
6
6
|
from .mem_agent import MemAgent
|
|
7
7
|
from .memory_manager import MemoryManager
|
|
8
|
-
from .llm_client import OllamaClient
|
|
8
|
+
from .llm_client import OllamaClient # Backward compatibility
|
|
9
|
+
from .base_llm_client import BaseLLMClient
|
|
10
|
+
from .llm_client_factory import LLMClientFactory
|
|
11
|
+
|
|
12
|
+
# New multi-backend support (v1.3.0+)
|
|
13
|
+
from .clients import OllamaClient as OllamaClientNew
|
|
14
|
+
from .clients import LMStudioClient, GeminiClient
|
|
9
15
|
|
|
10
16
|
# Tools (optional)
|
|
11
17
|
try:
|
|
@@ -43,9 +49,26 @@ try:
|
|
|
43
49
|
except ImportError:
|
|
44
50
|
__all_enhanced__ = []
|
|
45
51
|
|
|
46
|
-
|
|
52
|
+
# Conversation Summarization (v1.2.0+)
|
|
53
|
+
try:
|
|
54
|
+
from .conversation_summarizer import ConversationSummarizer, AutoSummarizer
|
|
55
|
+
__all_summarizer__ = ["ConversationSummarizer", "AutoSummarizer"]
|
|
56
|
+
except ImportError:
|
|
57
|
+
__all_summarizer__ = []
|
|
58
|
+
|
|
59
|
+
# Data Export/Import (v1.2.0+)
|
|
60
|
+
try:
|
|
61
|
+
from .data_export_import import DataExporter, DataImporter
|
|
62
|
+
__all_export_import__ = ["DataExporter", "DataImporter"]
|
|
63
|
+
except ImportError:
|
|
64
|
+
__all_export_import__ = []
|
|
65
|
+
|
|
66
|
+
__version__ = "1.3.0"
|
|
47
67
|
__author__ = "C. Emre Karataş"
|
|
48
68
|
|
|
69
|
+
# Multi-backend LLM support (v1.3.0+)
|
|
70
|
+
__all_llm_backends__ = ["BaseLLMClient", "LLMClientFactory", "OllamaClientNew", "LMStudioClient", "GeminiClient"]
|
|
71
|
+
|
|
49
72
|
# CLI
|
|
50
73
|
try:
|
|
51
74
|
from .cli import cli
|
|
@@ -57,4 +80,4 @@ __all__ = [
|
|
|
57
80
|
"MemAgent",
|
|
58
81
|
"MemoryManager",
|
|
59
82
|
"OllamaClient",
|
|
60
|
-
] + __all_tools__ + __all_pro__ + __all_cli__ + __all_security__ + __all_enhanced__
|
|
83
|
+
] + __all_llm_backends__ + __all_tools__ + __all_pro__ + __all_cli__ + __all_security__ + __all_enhanced__ + __all_summarizer__ + __all_export_import__
|