mem-llm 1.2.0__tar.gz → 1.3.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mem-llm might be problematic. Click here for more details.

Files changed (51) hide show
  1. {mem_llm-1.2.0 → mem_llm-1.3.1}/CHANGELOG.md +66 -0
  2. {mem_llm-1.2.0/mem_llm.egg-info → mem_llm-1.3.1}/PKG-INFO +103 -36
  3. {mem_llm-1.2.0 → mem_llm-1.3.1}/README.md +99 -33
  4. {mem_llm-1.2.0 → mem_llm-1.3.1}/mem_llm/__init__.py +12 -3
  5. mem_llm-1.3.1/mem_llm/base_llm_client.py +175 -0
  6. mem_llm-1.3.1/mem_llm/clients/__init__.py +25 -0
  7. mem_llm-1.3.1/mem_llm/clients/gemini_client.py +381 -0
  8. mem_llm-1.3.1/mem_llm/clients/lmstudio_client.py +280 -0
  9. mem_llm-1.3.1/mem_llm/clients/ollama_client.py +268 -0
  10. mem_llm-1.3.1/mem_llm/llm_client_factory.py +277 -0
  11. {mem_llm-1.2.0 → mem_llm-1.3.1}/mem_llm/mem_agent.py +123 -37
  12. {mem_llm-1.2.0 → mem_llm-1.3.1/mem_llm.egg-info}/PKG-INFO +103 -36
  13. {mem_llm-1.2.0 → mem_llm-1.3.1}/mem_llm.egg-info/SOURCES.txt +7 -0
  14. {mem_llm-1.2.0 → mem_llm-1.3.1}/mem_llm.egg-info/requires.txt +1 -0
  15. {mem_llm-1.2.0 → mem_llm-1.3.1}/pyproject.toml +5 -4
  16. {mem_llm-1.2.0 → mem_llm-1.3.1}/requirements.txt +1 -0
  17. mem_llm-1.3.1/tests/test_llm_backends.py +352 -0
  18. {mem_llm-1.2.0 → mem_llm-1.3.1}/MANIFEST.in +0 -0
  19. {mem_llm-1.2.0 → mem_llm-1.3.1}/mem_llm/cli.py +0 -0
  20. {mem_llm-1.2.0 → mem_llm-1.3.1}/mem_llm/config.yaml.example +0 -0
  21. {mem_llm-1.2.0 → mem_llm-1.3.1}/mem_llm/config_from_docs.py +0 -0
  22. {mem_llm-1.2.0 → mem_llm-1.3.1}/mem_llm/config_manager.py +0 -0
  23. {mem_llm-1.2.0 → mem_llm-1.3.1}/mem_llm/conversation_summarizer.py +0 -0
  24. {mem_llm-1.2.0 → mem_llm-1.3.1}/mem_llm/data_export_import.py +0 -0
  25. {mem_llm-1.2.0 → mem_llm-1.3.1}/mem_llm/dynamic_prompt.py +0 -0
  26. {mem_llm-1.2.0 → mem_llm-1.3.1}/mem_llm/knowledge_loader.py +0 -0
  27. {mem_llm-1.2.0 → mem_llm-1.3.1}/mem_llm/llm_client.py +0 -0
  28. {mem_llm-1.2.0 → mem_llm-1.3.1}/mem_llm/logger.py +0 -0
  29. {mem_llm-1.2.0 → mem_llm-1.3.1}/mem_llm/memory_db.py +0 -0
  30. {mem_llm-1.2.0 → mem_llm-1.3.1}/mem_llm/memory_manager.py +0 -0
  31. {mem_llm-1.2.0 → mem_llm-1.3.1}/mem_llm/memory_tools.py +0 -0
  32. {mem_llm-1.2.0 → mem_llm-1.3.1}/mem_llm/prompt_security.py +0 -0
  33. {mem_llm-1.2.0 → mem_llm-1.3.1}/mem_llm/retry_handler.py +0 -0
  34. {mem_llm-1.2.0 → mem_llm-1.3.1}/mem_llm/thread_safe_db.py +0 -0
  35. {mem_llm-1.2.0 → mem_llm-1.3.1}/mem_llm.egg-info/dependency_links.txt +0 -0
  36. {mem_llm-1.2.0 → mem_llm-1.3.1}/mem_llm.egg-info/entry_points.txt +0 -0
  37. {mem_llm-1.2.0 → mem_llm-1.3.1}/mem_llm.egg-info/top_level.txt +0 -0
  38. {mem_llm-1.2.0 → mem_llm-1.3.1}/requirements-dev.txt +0 -0
  39. {mem_llm-1.2.0 → mem_llm-1.3.1}/requirements-optional.txt +0 -0
  40. {mem_llm-1.2.0 → mem_llm-1.3.1}/setup.cfg +0 -0
  41. {mem_llm-1.2.0 → mem_llm-1.3.1}/tests/test_advanced_coverage.py +0 -0
  42. {mem_llm-1.2.0 → mem_llm-1.3.1}/tests/test_backward_compatibility.py +0 -0
  43. {mem_llm-1.2.0 → mem_llm-1.3.1}/tests/test_conversation_summarizer.py +0 -0
  44. {mem_llm-1.2.0 → mem_llm-1.3.1}/tests/test_data_export_import.py +0 -0
  45. {mem_llm-1.2.0 → mem_llm-1.3.1}/tests/test_improvements.py +0 -0
  46. {mem_llm-1.2.0 → mem_llm-1.3.1}/tests/test_integration.py +0 -0
  47. {mem_llm-1.2.0 → mem_llm-1.3.1}/tests/test_llm_client.py +0 -0
  48. {mem_llm-1.2.0 → mem_llm-1.3.1}/tests/test_mem_agent.py +0 -0
  49. {mem_llm-1.2.0 → mem_llm-1.3.1}/tests/test_memory_manager.py +0 -0
  50. {mem_llm-1.2.0 → mem_llm-1.3.1}/tests/test_memory_tools.py +0 -0
  51. {mem_llm-1.2.0 → mem_llm-1.3.1}/tests/test_qwen3_model.py +0 -0
@@ -5,6 +5,72 @@ All notable changes to this project will be documented in this file.
5
5
  The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
6
6
  and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
7
7
 
8
+ ## [1.3.1] - 2025-10-31
9
+
10
+ ### 📝 Documentation
11
+
12
+ - ✅ **README Update**: Fixed PyPI package README to show v1.3.0 features correctly
13
+ - ✅ No code changes - all v1.3.0 functionality remains the same
14
+
15
+ ## [1.3.0] - 2025-10-31
16
+
17
+ ### 🎉 Major Features
18
+
19
+ - 🔌 **Multi-Backend LLM Support**: Choose your preferred LLM backend
20
+ - **Ollama**: Local, privacy-first, 100+ models
21
+ - **LM Studio**: Fast local inference with easy GUI
22
+ - **Google Gemini**: Powerful cloud models (gemini-2.5-flash)
23
+ - Unified API across all backends
24
+ - Seamless switching between backends
25
+
26
+ - 🏗️ **Factory Pattern Architecture**: Clean, extensible design
27
+ - `LLMClientFactory`: Central backend management
28
+ - `BaseLLMClient`: Abstract interface for all backends
29
+ - Easy to add new backends in the future
30
+
31
+ - 🔍 **Auto-Detection**: Automatically find available LLM service
32
+ - `auto_detect_backend=True` parameter
33
+ - Checks Ollama → LM Studio → other local services
34
+ - No manual configuration needed
35
+
36
+ ### 🆕 New Components
37
+
38
+ - `BaseLLMClient`: Abstract base class for all LLM backends
39
+ - `LLMClientFactory`: Factory pattern for backend creation
40
+ - `OllamaClient` (refactored): Now inherits from BaseLLMClient
41
+ - `LMStudioClient`: OpenAI-compatible local inference
42
+ - `GeminiClient`: Google Gemini API integration
43
+
44
+ ### 📚 New Examples
45
+
46
+ - `11_lmstudio_example.py`: Using LM Studio backend
47
+ - `12_gemini_example.py`: Using Google Gemini API
48
+ - `13_multi_backend_comparison.py`: Compare backend performance
49
+ - `14_auto_detect_backend.py`: Auto-detection feature
50
+
51
+ ### 📖 New Documentation
52
+
53
+ - `MULTI_BACKEND_GUIDE.md`: Comprehensive guide for multi-backend setup
54
+
55
+ ### 🔄 Changed
56
+
57
+ - **MemAgent**: Now supports multiple backends (backward compatible)
58
+ - **Examples**: All simplified for clarity
59
+ - **Package structure**: Better organized with `clients/` subdirectory
60
+
61
+ ### ⚡ Improved
62
+
63
+ - **Backward Compatibility**: All v1.2.0 code still works
64
+ - **Error Messages**: Backend-specific troubleshooting
65
+ - **Connection Checks**: Improved availability detection
66
+
67
+ ### 🧪 Testing
68
+
69
+ - 16+ new tests for multi-backend support
70
+ - Factory pattern tests
71
+ - Backend availability checks
72
+ - MemAgent integration tests
73
+
8
74
  ## [1.2.0] - 2025-10-21
9
75
 
10
76
  ### Added
@@ -1,13 +1,13 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: mem-llm
3
- Version: 1.2.0
4
- Summary: Memory-enabled AI assistant with local LLM support - Now with data import/export and multi-database support
3
+ Version: 1.3.1
4
+ Summary: Memory-enabled AI assistant with multi-backend LLM support (Ollama, LM Studio, Gemini) - Local and cloud ready
5
5
  Author-email: "C. Emre Karataş" <karatasqemre@gmail.com>
6
6
  License: MIT
7
7
  Project-URL: Homepage, https://github.com/emredeveloper/Mem-LLM
8
8
  Project-URL: Bug Reports, https://github.com/emredeveloper/Mem-LLM/issues
9
9
  Project-URL: Source, https://github.com/emredeveloper/Mem-LLM
10
- Keywords: llm,ai,memory,agent,chatbot,ollama,local
10
+ Keywords: llm,ai,memory,agent,chatbot,ollama,lmstudio,gemini,multi-backend,local
11
11
  Classifier: Development Status :: 4 - Beta
12
12
  Classifier: Intended Audience :: Developers
13
13
  Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
@@ -22,6 +22,7 @@ Description-Content-Type: text/markdown
22
22
  Requires-Dist: requests>=2.31.0
23
23
  Requires-Dist: pyyaml>=6.0.1
24
24
  Requires-Dist: click>=8.1.0
25
+ Requires-Dist: google-generativeai>=0.3.0
25
26
  Provides-Extra: dev
26
27
  Requires-Dist: pytest>=7.4.0; extra == "dev"
27
28
  Requires-Dist: pytest-cov>=4.1.0; extra == "dev"
@@ -58,9 +59,9 @@ Requires-Dist: pymongo>=4.6.0; extra == "all"
58
59
  [![Python 3.8+](https://img.shields.io/badge/python-3.8+-blue.svg)](https://www.python.org/downloads/)
59
60
  [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
60
61
 
61
- **Memory-enabled AI assistant with local LLM support**
62
+ **Memory-enabled AI assistant with multi-backend LLM support (Ollama, LM Studio, Gemini)**
62
63
 
63
- Mem-LLM is a powerful Python library that brings persistent memory capabilities to local Large Language Models. Build AI assistants that remember user interactions, manage knowledge bases, and work completely offline with Ollama.
64
+ Mem-LLM is a powerful Python library that brings persistent memory capabilities to Large Language Models. Build AI assistants that remember user interactions, manage knowledge bases, and choose between local (Ollama, LM Studio) or cloud (Gemini) backends.
64
65
 
65
66
  ## 🔗 Links
66
67
 
@@ -69,29 +70,31 @@ Mem-LLM is a powerful Python library that brings persistent memory capabilities
69
70
  - **Issues**: https://github.com/emredeveloper/Mem-LLM/issues
70
71
  - **Documentation**: See examples/ directory
71
72
 
72
- ## 🆕 What's New in v1.2.0
73
+ ## 🆕 What's New in v1.3.0
73
74
 
74
- - **Conversation Summarization**: Automatic conversation compression (~40-60% token reduction)
75
- - 📤 **Data Export/Import**: JSON, CSV, SQLite, PostgreSQL, MongoDB support
76
- - 🗄️ **Multi-Database**: Enterprise-ready PostgreSQL & MongoDB integration
77
- - �️ **In-Memory DB**: Use `:memory:` for temporary operations
78
- - **Cleaner Logs**: Default WARNING level for production-ready output
79
- - **Bug Fixes**: Database path handling, organized SQLite files
75
+ - 🔌 **Multi-Backend Support**: Choose between Ollama (local), LM Studio (local), or Google Gemini (cloud)
76
+ - 🏗️ **Factory Pattern**: Clean, extensible architecture for easy backend switching
77
+ - 🔍 **Auto-Detection**: Automatically finds and uses available local LLM services
78
+ - **Unified API**: Same code works across all backends - just change one parameter
79
+ - 📚 **New Examples**: 4 additional examples showing multi-backend usage
80
+ - 🎯 **Backward Compatible**: All v1.2.0 code still works without changes
80
81
 
81
- [See full changelog](CHANGELOG.md#120---2025-10-21)
82
+ [See full changelog](CHANGELOG.md#130---2025-10-31)
82
83
 
83
84
  ## ✨ Key Features
84
85
 
86
+ - 🔌 **Multi-Backend Support** (v1.3.0+) - Choose Ollama, LM Studio, or Gemini with unified API
87
+ - 🔍 **Auto-Detection** (v1.3.0+) - Automatically find and use available LLM services
85
88
  - 🧠 **Persistent Memory** - Remembers conversations across sessions
86
- - 🤖 **Universal Ollama Support** - Works with ALL Ollama models (Qwen3, DeepSeek, Llama3, Granite, etc.)
89
+ - 🤖 **Universal Model Support** - Works with 100+ Ollama models, LM Studio models, and Gemini
87
90
  - 💾 **Dual Storage Modes** - JSON (simple) or SQLite (advanced) memory backends
88
91
  - 📚 **Knowledge Base** - Built-in FAQ/support system with categorized entries
89
92
  - 🎯 **Dynamic Prompts** - Context-aware system prompts that adapt to active features
90
93
  - 👥 **Multi-User Support** - Separate memory spaces for different users
91
94
  - 🔧 **Memory Tools** - Search, export, and manage stored memories
92
95
  - 🎨 **Flexible Configuration** - Personal or business usage modes
93
- - 📊 **Production Ready** - Comprehensive test suite with 34+ automated tests
94
- - 🔒 **100% Local & Private** - No cloud dependencies, your data stays yours
96
+ - 📊 **Production Ready** - Comprehensive test suite with 50+ automated tests
97
+ - 🔒 **Privacy Options** - 100% local (Ollama/LM Studio) or cloud (Gemini)
95
98
  - 🛡️ **Prompt Injection Protection** (v1.1.0+) - Advanced security against prompt attacks (opt-in)
96
99
  - ⚡ **High Performance** (v1.1.0+) - Thread-safe operations, 15K+ msg/s throughput
97
100
  - 🔄 **Retry Logic** (v1.1.0+) - Automatic exponential backoff for network errors
@@ -129,8 +132,9 @@ pip install -U mem-llm
129
132
 
130
133
  ### Prerequisites
131
134
 
132
- Install and start [Ollama](https://ollama.ai):
135
+ **Choose one of the following LLM backends:**
133
136
 
137
+ #### Option 1: Ollama (Local, Privacy-First)
134
138
  ```bash
135
139
  # Install Ollama (visit https://ollama.ai)
136
140
  # Then pull a model
@@ -140,15 +144,38 @@ ollama pull granite4:tiny-h
140
144
  ollama serve
141
145
  ```
142
146
 
147
+ #### Option 2: LM Studio (Local, GUI-Based)
148
+ ```bash
149
+ # 1. Download and install LM Studio: https://lmstudio.ai
150
+ # 2. Download a model from the UI
151
+ # 3. Start the local server (default port: 1234)
152
+ ```
153
+
154
+ #### Option 3: Google Gemini (Cloud, Powerful)
155
+ ```bash
156
+ # Get API key from: https://makersuite.google.com/app/apikey
157
+ # Set environment variable
158
+ export GEMINI_API_KEY="your-api-key-here"
159
+ ```
160
+
143
161
  ### Basic Usage
144
162
 
145
163
  ```python
146
164
  from mem_llm import MemAgent
147
165
 
148
- # Create an agent
166
+ # Option 1: Use Ollama (default)
149
167
  agent = MemAgent(model="granite4:tiny-h")
150
168
 
151
- # Set user and chat
169
+ # Option 2: Use LM Studio
170
+ agent = MemAgent(backend='lmstudio', model='local-model')
171
+
172
+ # Option 3: Use Gemini
173
+ agent = MemAgent(backend='gemini', model='gemini-2.5-flash', api_key='your-key')
174
+
175
+ # Option 4: Auto-detect available backend
176
+ agent = MemAgent(auto_detect_backend=True)
177
+
178
+ # Set user and chat (same for all backends!)
152
179
  agent.set_user("alice")
153
180
  response = agent.chat("My name is Alice and I love Python!")
154
181
  print(response)
@@ -158,10 +185,34 @@ response = agent.chat("What's my name and what do I love?")
158
185
  print(response) # Agent remembers: "Your name is Alice and you love Python!"
159
186
  ```
160
187
 
161
- That's it! Just 5 lines of code to get started.
188
+ That's it! Just 5 lines of code to get started with any backend.
162
189
 
163
190
  ## 📖 Usage Examples
164
191
 
192
+ ### Multi-Backend Examples (v1.3.0+)
193
+
194
+ ```python
195
+ from mem_llm import MemAgent
196
+
197
+ # LM Studio - Fast local inference
198
+ agent = MemAgent(
199
+ backend='lmstudio',
200
+ model='local-model',
201
+ base_url='http://localhost:1234'
202
+ )
203
+
204
+ # Google Gemini - Cloud power
205
+ agent = MemAgent(
206
+ backend='gemini',
207
+ model='gemini-2.5-flash',
208
+ api_key='your-api-key'
209
+ )
210
+
211
+ # Auto-detect - Universal compatibility
212
+ agent = MemAgent(auto_detect_backend=True)
213
+ print(f"Using: {agent.llm.get_backend_info()['name']}")
214
+ ```
215
+
165
216
  ### Multi-User Conversations
166
217
 
167
218
  ```python
@@ -378,16 +429,21 @@ Mem-LLM works with **ALL Ollama models**, including:
378
429
  ```
379
430
  mem-llm/
380
431
  ├── mem_llm/
381
- │ ├── mem_agent.py # Main agent class
382
- │ ├── memory_manager.py # JSON memory backend
383
- │ ├── memory_db.py # SQL memory backend
384
- │ ├── llm_client.py # Ollama API client
385
- │ ├── knowledge_loader.py # Knowledge base system
386
- │ ├── dynamic_prompt.py # Context-aware prompts
387
- ├── memory_tools.py # Memory management tools
388
- │ ├── config_manager.py # Configuration handler
389
- └── cli.py # Command-line interface
390
- └── examples/ # Usage examples
432
+ │ ├── mem_agent.py # Main agent class (multi-backend)
433
+ │ ├── base_llm_client.py # Abstract LLM interface
434
+ │ ├── llm_client_factory.py # Backend factory pattern
435
+ │ ├── clients/ # LLM backend implementations
436
+ ├── ollama_client.py # Ollama integration
437
+ ├── lmstudio_client.py # LM Studio integration
438
+ │ └── gemini_client.py # Google Gemini integration
439
+ │ ├── memory_manager.py # JSON memory backend
440
+ ├── memory_db.py # SQL memory backend
441
+ │ ├── knowledge_loader.py # Knowledge base system
442
+ │ ├── dynamic_prompt.py # Context-aware prompts
443
+ │ ├── memory_tools.py # Memory management tools
444
+ │ ├── config_manager.py # Configuration handler
445
+ │ └── cli.py # Command-line interface
446
+ └── examples/ # Usage examples (14 total)
391
447
  ```
392
448
 
393
449
  ## 🔥 Advanced Features
@@ -429,10 +485,12 @@ stats = agent.get_memory_stats()
429
485
  ## 📦 Project Structure
430
486
 
431
487
  ### Core Components
432
- - **MemAgent**: Main interface for building AI assistants
488
+ - **MemAgent**: Main interface for building AI assistants (multi-backend support)
489
+ - **LLMClientFactory**: Factory pattern for backend creation
490
+ - **BaseLLMClient**: Abstract interface for all LLM backends
491
+ - **OllamaClient / LMStudioClient / GeminiClient**: Backend implementations
433
492
  - **MemoryManager**: JSON-based memory storage (simple)
434
493
  - **SQLMemoryManager**: SQLite-based storage (advanced)
435
- - **OllamaClient**: LLM communication handler
436
494
  - **KnowledgeLoader**: Knowledge base management
437
495
 
438
496
  ### Optional Features
@@ -456,14 +514,19 @@ The `examples/` directory contains ready-to-run demonstrations:
456
514
  8. **08_conversation_summarization.py** - Token compression with auto-summary (v1.2.0+)
457
515
  9. **09_data_export_import.py** - Multi-format export/import demo (v1.2.0+)
458
516
  10. **10_database_connection_test.py** - Enterprise PostgreSQL/MongoDB migration (v1.2.0+)
517
+ 11. **11_lmstudio_example.py** - Using LM Studio backend (v1.3.0+)
518
+ 12. **12_gemini_example.py** - Using Google Gemini API (v1.3.0+)
519
+ 13. **13_multi_backend_comparison.py** - Compare different backends (v1.3.0+)
520
+ 14. **14_auto_detect_backend.py** - Auto-detection feature demo (v1.3.0+)
459
521
 
460
522
  ## 📊 Project Status
461
523
 
462
- - **Version**: 1.2.0
524
+ - **Version**: 1.3.0
463
525
  - **Status**: Production Ready
464
- - **Last Updated**: October 21, 2025
465
- - **Test Coverage**: 16/16 automated tests (100% success rate)
526
+ - **Last Updated**: October 31, 2025
527
+ - **Test Coverage**: 50+ automated tests (100% success rate)
466
528
  - **Performance**: Thread-safe operations, <1ms search latency
529
+ - **Backends**: Ollama, LM Studio, Google Gemini
467
530
  - **Databases**: SQLite, PostgreSQL, MongoDB, In-Memory
468
531
 
469
532
  ## 📈 Roadmap
@@ -475,10 +538,14 @@ The `examples/` directory contains ready-to-run demonstrations:
475
538
  - [x] ~~Conversation Summarization~~ (v1.2.0)
476
539
  - [x] ~~Multi-Database Export/Import~~ (v1.2.0)
477
540
  - [x] ~~In-Memory Database~~ (v1.2.0)
541
+ - [x] ~~Multi-Backend Support (Ollama, LM Studio, Gemini)~~ (v1.3.0)
542
+ - [x] ~~Auto-Detection~~ (v1.3.0)
543
+ - [x] ~~Factory Pattern Architecture~~ (v1.3.0)
544
+ - [ ] OpenAI & Claude backends
545
+ - [ ] Streaming support
478
546
  - [ ] Web UI dashboard
479
547
  - [ ] REST API server
480
548
  - [ ] Vector database integration
481
- - [ ] Advanced analytics dashboard
482
549
 
483
550
  ## 📄 License
484
551
 
@@ -4,9 +4,9 @@
4
4
  [![Python 3.8+](https://img.shields.io/badge/python-3.8+-blue.svg)](https://www.python.org/downloads/)
5
5
  [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
6
6
 
7
- **Memory-enabled AI assistant with local LLM support**
7
+ **Memory-enabled AI assistant with multi-backend LLM support (Ollama, LM Studio, Gemini)**
8
8
 
9
- Mem-LLM is a powerful Python library that brings persistent memory capabilities to local Large Language Models. Build AI assistants that remember user interactions, manage knowledge bases, and work completely offline with Ollama.
9
+ Mem-LLM is a powerful Python library that brings persistent memory capabilities to Large Language Models. Build AI assistants that remember user interactions, manage knowledge bases, and choose between local (Ollama, LM Studio) or cloud (Gemini) backends.
10
10
 
11
11
  ## 🔗 Links
12
12
 
@@ -15,29 +15,31 @@ Mem-LLM is a powerful Python library that brings persistent memory capabilities
15
15
  - **Issues**: https://github.com/emredeveloper/Mem-LLM/issues
16
16
  - **Documentation**: See examples/ directory
17
17
 
18
- ## 🆕 What's New in v1.2.0
18
+ ## 🆕 What's New in v1.3.0
19
19
 
20
- - **Conversation Summarization**: Automatic conversation compression (~40-60% token reduction)
21
- - 📤 **Data Export/Import**: JSON, CSV, SQLite, PostgreSQL, MongoDB support
22
- - 🗄️ **Multi-Database**: Enterprise-ready PostgreSQL & MongoDB integration
23
- - �️ **In-Memory DB**: Use `:memory:` for temporary operations
24
- - **Cleaner Logs**: Default WARNING level for production-ready output
25
- - **Bug Fixes**: Database path handling, organized SQLite files
20
+ - 🔌 **Multi-Backend Support**: Choose between Ollama (local), LM Studio (local), or Google Gemini (cloud)
21
+ - 🏗️ **Factory Pattern**: Clean, extensible architecture for easy backend switching
22
+ - 🔍 **Auto-Detection**: Automatically finds and uses available local LLM services
23
+ - **Unified API**: Same code works across all backends - just change one parameter
24
+ - 📚 **New Examples**: 4 additional examples showing multi-backend usage
25
+ - 🎯 **Backward Compatible**: All v1.2.0 code still works without changes
26
26
 
27
- [See full changelog](CHANGELOG.md#120---2025-10-21)
27
+ [See full changelog](CHANGELOG.md#130---2025-10-31)
28
28
 
29
29
  ## ✨ Key Features
30
30
 
31
+ - 🔌 **Multi-Backend Support** (v1.3.0+) - Choose Ollama, LM Studio, or Gemini with unified API
32
+ - 🔍 **Auto-Detection** (v1.3.0+) - Automatically find and use available LLM services
31
33
  - 🧠 **Persistent Memory** - Remembers conversations across sessions
32
- - 🤖 **Universal Ollama Support** - Works with ALL Ollama models (Qwen3, DeepSeek, Llama3, Granite, etc.)
34
+ - 🤖 **Universal Model Support** - Works with 100+ Ollama models, LM Studio models, and Gemini
33
35
  - 💾 **Dual Storage Modes** - JSON (simple) or SQLite (advanced) memory backends
34
36
  - 📚 **Knowledge Base** - Built-in FAQ/support system with categorized entries
35
37
  - 🎯 **Dynamic Prompts** - Context-aware system prompts that adapt to active features
36
38
  - 👥 **Multi-User Support** - Separate memory spaces for different users
37
39
  - 🔧 **Memory Tools** - Search, export, and manage stored memories
38
40
  - 🎨 **Flexible Configuration** - Personal or business usage modes
39
- - 📊 **Production Ready** - Comprehensive test suite with 34+ automated tests
40
- - 🔒 **100% Local & Private** - No cloud dependencies, your data stays yours
41
+ - 📊 **Production Ready** - Comprehensive test suite with 50+ automated tests
42
+ - 🔒 **Privacy Options** - 100% local (Ollama/LM Studio) or cloud (Gemini)
41
43
  - 🛡️ **Prompt Injection Protection** (v1.1.0+) - Advanced security against prompt attacks (opt-in)
42
44
  - ⚡ **High Performance** (v1.1.0+) - Thread-safe operations, 15K+ msg/s throughput
43
45
  - 🔄 **Retry Logic** (v1.1.0+) - Automatic exponential backoff for network errors
@@ -75,8 +77,9 @@ pip install -U mem-llm
75
77
 
76
78
  ### Prerequisites
77
79
 
78
- Install and start [Ollama](https://ollama.ai):
80
+ **Choose one of the following LLM backends:**
79
81
 
82
+ #### Option 1: Ollama (Local, Privacy-First)
80
83
  ```bash
81
84
  # Install Ollama (visit https://ollama.ai)
82
85
  # Then pull a model
@@ -86,15 +89,38 @@ ollama pull granite4:tiny-h
86
89
  ollama serve
87
90
  ```
88
91
 
92
+ #### Option 2: LM Studio (Local, GUI-Based)
93
+ ```bash
94
+ # 1. Download and install LM Studio: https://lmstudio.ai
95
+ # 2. Download a model from the UI
96
+ # 3. Start the local server (default port: 1234)
97
+ ```
98
+
99
+ #### Option 3: Google Gemini (Cloud, Powerful)
100
+ ```bash
101
+ # Get API key from: https://makersuite.google.com/app/apikey
102
+ # Set environment variable
103
+ export GEMINI_API_KEY="your-api-key-here"
104
+ ```
105
+
89
106
  ### Basic Usage
90
107
 
91
108
  ```python
92
109
  from mem_llm import MemAgent
93
110
 
94
- # Create an agent
111
+ # Option 1: Use Ollama (default)
95
112
  agent = MemAgent(model="granite4:tiny-h")
96
113
 
97
- # Set user and chat
114
+ # Option 2: Use LM Studio
115
+ agent = MemAgent(backend='lmstudio', model='local-model')
116
+
117
+ # Option 3: Use Gemini
118
+ agent = MemAgent(backend='gemini', model='gemini-2.5-flash', api_key='your-key')
119
+
120
+ # Option 4: Auto-detect available backend
121
+ agent = MemAgent(auto_detect_backend=True)
122
+
123
+ # Set user and chat (same for all backends!)
98
124
  agent.set_user("alice")
99
125
  response = agent.chat("My name is Alice and I love Python!")
100
126
  print(response)
@@ -104,10 +130,34 @@ response = agent.chat("What's my name and what do I love?")
104
130
  print(response) # Agent remembers: "Your name is Alice and you love Python!"
105
131
  ```
106
132
 
107
- That's it! Just 5 lines of code to get started.
133
+ That's it! Just 5 lines of code to get started with any backend.
108
134
 
109
135
  ## 📖 Usage Examples
110
136
 
137
+ ### Multi-Backend Examples (v1.3.0+)
138
+
139
+ ```python
140
+ from mem_llm import MemAgent
141
+
142
+ # LM Studio - Fast local inference
143
+ agent = MemAgent(
144
+ backend='lmstudio',
145
+ model='local-model',
146
+ base_url='http://localhost:1234'
147
+ )
148
+
149
+ # Google Gemini - Cloud power
150
+ agent = MemAgent(
151
+ backend='gemini',
152
+ model='gemini-2.5-flash',
153
+ api_key='your-api-key'
154
+ )
155
+
156
+ # Auto-detect - Universal compatibility
157
+ agent = MemAgent(auto_detect_backend=True)
158
+ print(f"Using: {agent.llm.get_backend_info()['name']}")
159
+ ```
160
+
111
161
  ### Multi-User Conversations
112
162
 
113
163
  ```python
@@ -324,16 +374,21 @@ Mem-LLM works with **ALL Ollama models**, including:
324
374
  ```
325
375
  mem-llm/
326
376
  ├── mem_llm/
327
- │ ├── mem_agent.py # Main agent class
328
- │ ├── memory_manager.py # JSON memory backend
329
- │ ├── memory_db.py # SQL memory backend
330
- │ ├── llm_client.py # Ollama API client
331
- │ ├── knowledge_loader.py # Knowledge base system
332
- │ ├── dynamic_prompt.py # Context-aware prompts
333
- ├── memory_tools.py # Memory management tools
334
- │ ├── config_manager.py # Configuration handler
335
- └── cli.py # Command-line interface
336
- └── examples/ # Usage examples
377
+ │ ├── mem_agent.py # Main agent class (multi-backend)
378
+ │ ├── base_llm_client.py # Abstract LLM interface
379
+ │ ├── llm_client_factory.py # Backend factory pattern
380
+ │ ├── clients/ # LLM backend implementations
381
+ ├── ollama_client.py # Ollama integration
382
+ ├── lmstudio_client.py # LM Studio integration
383
+ │ └── gemini_client.py # Google Gemini integration
384
+ │ ├── memory_manager.py # JSON memory backend
385
+ ├── memory_db.py # SQL memory backend
386
+ │ ├── knowledge_loader.py # Knowledge base system
387
+ │ ├── dynamic_prompt.py # Context-aware prompts
388
+ │ ├── memory_tools.py # Memory management tools
389
+ │ ├── config_manager.py # Configuration handler
390
+ │ └── cli.py # Command-line interface
391
+ └── examples/ # Usage examples (14 total)
337
392
  ```
338
393
 
339
394
  ## 🔥 Advanced Features
@@ -375,10 +430,12 @@ stats = agent.get_memory_stats()
375
430
  ## 📦 Project Structure
376
431
 
377
432
  ### Core Components
378
- - **MemAgent**: Main interface for building AI assistants
433
+ - **MemAgent**: Main interface for building AI assistants (multi-backend support)
434
+ - **LLMClientFactory**: Factory pattern for backend creation
435
+ - **BaseLLMClient**: Abstract interface for all LLM backends
436
+ - **OllamaClient / LMStudioClient / GeminiClient**: Backend implementations
379
437
  - **MemoryManager**: JSON-based memory storage (simple)
380
438
  - **SQLMemoryManager**: SQLite-based storage (advanced)
381
- - **OllamaClient**: LLM communication handler
382
439
  - **KnowledgeLoader**: Knowledge base management
383
440
 
384
441
  ### Optional Features
@@ -402,14 +459,19 @@ The `examples/` directory contains ready-to-run demonstrations:
402
459
  8. **08_conversation_summarization.py** - Token compression with auto-summary (v1.2.0+)
403
460
  9. **09_data_export_import.py** - Multi-format export/import demo (v1.2.0+)
404
461
  10. **10_database_connection_test.py** - Enterprise PostgreSQL/MongoDB migration (v1.2.0+)
462
+ 11. **11_lmstudio_example.py** - Using LM Studio backend (v1.3.0+)
463
+ 12. **12_gemini_example.py** - Using Google Gemini API (v1.3.0+)
464
+ 13. **13_multi_backend_comparison.py** - Compare different backends (v1.3.0+)
465
+ 14. **14_auto_detect_backend.py** - Auto-detection feature demo (v1.3.0+)
405
466
 
406
467
  ## 📊 Project Status
407
468
 
408
- - **Version**: 1.2.0
469
+ - **Version**: 1.3.0
409
470
  - **Status**: Production Ready
410
- - **Last Updated**: October 21, 2025
411
- - **Test Coverage**: 16/16 automated tests (100% success rate)
471
+ - **Last Updated**: October 31, 2025
472
+ - **Test Coverage**: 50+ automated tests (100% success rate)
412
473
  - **Performance**: Thread-safe operations, <1ms search latency
474
+ - **Backends**: Ollama, LM Studio, Google Gemini
413
475
  - **Databases**: SQLite, PostgreSQL, MongoDB, In-Memory
414
476
 
415
477
  ## 📈 Roadmap
@@ -421,10 +483,14 @@ The `examples/` directory contains ready-to-run demonstrations:
421
483
  - [x] ~~Conversation Summarization~~ (v1.2.0)
422
484
  - [x] ~~Multi-Database Export/Import~~ (v1.2.0)
423
485
  - [x] ~~In-Memory Database~~ (v1.2.0)
486
+ - [x] ~~Multi-Backend Support (Ollama, LM Studio, Gemini)~~ (v1.3.0)
487
+ - [x] ~~Auto-Detection~~ (v1.3.0)
488
+ - [x] ~~Factory Pattern Architecture~~ (v1.3.0)
489
+ - [ ] OpenAI & Claude backends
490
+ - [ ] Streaming support
424
491
  - [ ] Web UI dashboard
425
492
  - [ ] REST API server
426
493
  - [ ] Vector database integration
427
- - [ ] Advanced analytics dashboard
428
494
 
429
495
  ## 📄 License
430
496
 
@@ -5,7 +5,13 @@ AI library that remembers user interactions
5
5
 
6
6
  from .mem_agent import MemAgent
7
7
  from .memory_manager import MemoryManager
8
- from .llm_client import OllamaClient
8
+ from .llm_client import OllamaClient # Backward compatibility
9
+ from .base_llm_client import BaseLLMClient
10
+ from .llm_client_factory import LLMClientFactory
11
+
12
+ # New multi-backend support (v1.3.0+)
13
+ from .clients import OllamaClient as OllamaClientNew
14
+ from .clients import LMStudioClient, GeminiClient
9
15
 
10
16
  # Tools (optional)
11
17
  try:
@@ -57,9 +63,12 @@ try:
57
63
  except ImportError:
58
64
  __all_export_import__ = []
59
65
 
60
- __version__ = "1.2.0"
66
+ __version__ = "1.3.1"
61
67
  __author__ = "C. Emre Karataş"
62
68
 
69
+ # Multi-backend LLM support (v1.3.0+)
70
+ __all_llm_backends__ = ["BaseLLMClient", "LLMClientFactory", "OllamaClientNew", "LMStudioClient", "GeminiClient"]
71
+
63
72
  # CLI
64
73
  try:
65
74
  from .cli import cli
@@ -71,4 +80,4 @@ __all__ = [
71
80
  "MemAgent",
72
81
  "MemoryManager",
73
82
  "OllamaClient",
74
- ] + __all_tools__ + __all_pro__ + __all_cli__ + __all_security__ + __all_enhanced__ + __all_summarizer__ + __all_export_import__
83
+ ] + __all_llm_backends__ + __all_tools__ + __all_pro__ + __all_cli__ + __all_security__ + __all_enhanced__ + __all_summarizer__ + __all_export_import__