mem-llm 1.3.0__tar.gz → 1.3.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mem-llm might be problematic. Click here for more details.

Files changed (51) hide show
  1. {mem_llm-1.3.0 → mem_llm-1.3.1}/CHANGELOG.md +7 -0
  2. {mem_llm-1.3.0/mem_llm.egg-info → mem_llm-1.3.1}/PKG-INFO +100 -34
  3. {mem_llm-1.3.0 → mem_llm-1.3.1}/README.md +99 -33
  4. {mem_llm-1.3.0 → mem_llm-1.3.1}/mem_llm/__init__.py +1 -1
  5. {mem_llm-1.3.0 → mem_llm-1.3.1/mem_llm.egg-info}/PKG-INFO +100 -34
  6. {mem_llm-1.3.0 → mem_llm-1.3.1}/pyproject.toml +1 -1
  7. {mem_llm-1.3.0 → mem_llm-1.3.1}/MANIFEST.in +0 -0
  8. {mem_llm-1.3.0 → mem_llm-1.3.1}/mem_llm/base_llm_client.py +0 -0
  9. {mem_llm-1.3.0 → mem_llm-1.3.1}/mem_llm/cli.py +0 -0
  10. {mem_llm-1.3.0 → mem_llm-1.3.1}/mem_llm/clients/__init__.py +0 -0
  11. {mem_llm-1.3.0 → mem_llm-1.3.1}/mem_llm/clients/gemini_client.py +0 -0
  12. {mem_llm-1.3.0 → mem_llm-1.3.1}/mem_llm/clients/lmstudio_client.py +0 -0
  13. {mem_llm-1.3.0 → mem_llm-1.3.1}/mem_llm/clients/ollama_client.py +0 -0
  14. {mem_llm-1.3.0 → mem_llm-1.3.1}/mem_llm/config.yaml.example +0 -0
  15. {mem_llm-1.3.0 → mem_llm-1.3.1}/mem_llm/config_from_docs.py +0 -0
  16. {mem_llm-1.3.0 → mem_llm-1.3.1}/mem_llm/config_manager.py +0 -0
  17. {mem_llm-1.3.0 → mem_llm-1.3.1}/mem_llm/conversation_summarizer.py +0 -0
  18. {mem_llm-1.3.0 → mem_llm-1.3.1}/mem_llm/data_export_import.py +0 -0
  19. {mem_llm-1.3.0 → mem_llm-1.3.1}/mem_llm/dynamic_prompt.py +0 -0
  20. {mem_llm-1.3.0 → mem_llm-1.3.1}/mem_llm/knowledge_loader.py +0 -0
  21. {mem_llm-1.3.0 → mem_llm-1.3.1}/mem_llm/llm_client.py +0 -0
  22. {mem_llm-1.3.0 → mem_llm-1.3.1}/mem_llm/llm_client_factory.py +0 -0
  23. {mem_llm-1.3.0 → mem_llm-1.3.1}/mem_llm/logger.py +0 -0
  24. {mem_llm-1.3.0 → mem_llm-1.3.1}/mem_llm/mem_agent.py +0 -0
  25. {mem_llm-1.3.0 → mem_llm-1.3.1}/mem_llm/memory_db.py +0 -0
  26. {mem_llm-1.3.0 → mem_llm-1.3.1}/mem_llm/memory_manager.py +0 -0
  27. {mem_llm-1.3.0 → mem_llm-1.3.1}/mem_llm/memory_tools.py +0 -0
  28. {mem_llm-1.3.0 → mem_llm-1.3.1}/mem_llm/prompt_security.py +0 -0
  29. {mem_llm-1.3.0 → mem_llm-1.3.1}/mem_llm/retry_handler.py +0 -0
  30. {mem_llm-1.3.0 → mem_llm-1.3.1}/mem_llm/thread_safe_db.py +0 -0
  31. {mem_llm-1.3.0 → mem_llm-1.3.1}/mem_llm.egg-info/SOURCES.txt +0 -0
  32. {mem_llm-1.3.0 → mem_llm-1.3.1}/mem_llm.egg-info/dependency_links.txt +0 -0
  33. {mem_llm-1.3.0 → mem_llm-1.3.1}/mem_llm.egg-info/entry_points.txt +0 -0
  34. {mem_llm-1.3.0 → mem_llm-1.3.1}/mem_llm.egg-info/requires.txt +0 -0
  35. {mem_llm-1.3.0 → mem_llm-1.3.1}/mem_llm.egg-info/top_level.txt +0 -0
  36. {mem_llm-1.3.0 → mem_llm-1.3.1}/requirements-dev.txt +0 -0
  37. {mem_llm-1.3.0 → mem_llm-1.3.1}/requirements-optional.txt +0 -0
  38. {mem_llm-1.3.0 → mem_llm-1.3.1}/requirements.txt +0 -0
  39. {mem_llm-1.3.0 → mem_llm-1.3.1}/setup.cfg +0 -0
  40. {mem_llm-1.3.0 → mem_llm-1.3.1}/tests/test_advanced_coverage.py +0 -0
  41. {mem_llm-1.3.0 → mem_llm-1.3.1}/tests/test_backward_compatibility.py +0 -0
  42. {mem_llm-1.3.0 → mem_llm-1.3.1}/tests/test_conversation_summarizer.py +0 -0
  43. {mem_llm-1.3.0 → mem_llm-1.3.1}/tests/test_data_export_import.py +0 -0
  44. {mem_llm-1.3.0 → mem_llm-1.3.1}/tests/test_improvements.py +0 -0
  45. {mem_llm-1.3.0 → mem_llm-1.3.1}/tests/test_integration.py +0 -0
  46. {mem_llm-1.3.0 → mem_llm-1.3.1}/tests/test_llm_backends.py +0 -0
  47. {mem_llm-1.3.0 → mem_llm-1.3.1}/tests/test_llm_client.py +0 -0
  48. {mem_llm-1.3.0 → mem_llm-1.3.1}/tests/test_mem_agent.py +0 -0
  49. {mem_llm-1.3.0 → mem_llm-1.3.1}/tests/test_memory_manager.py +0 -0
  50. {mem_llm-1.3.0 → mem_llm-1.3.1}/tests/test_memory_tools.py +0 -0
  51. {mem_llm-1.3.0 → mem_llm-1.3.1}/tests/test_qwen3_model.py +0 -0
@@ -5,6 +5,13 @@ All notable changes to this project will be documented in this file.
5
5
  The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
6
6
  and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
7
7
 
8
+ ## [1.3.1] - 2025-10-31
9
+
10
+ ### 📝 Documentation
11
+
12
+ - ✅ **README Update**: Fixed PyPI package README to show v1.3.0 features correctly
13
+ - ✅ No code changes - all v1.3.0 functionality remains the same
14
+
8
15
  ## [1.3.0] - 2025-10-31
9
16
 
10
17
  ### 🎉 Major Features
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: mem-llm
3
- Version: 1.3.0
3
+ Version: 1.3.1
4
4
  Summary: Memory-enabled AI assistant with multi-backend LLM support (Ollama, LM Studio, Gemini) - Local and cloud ready
5
5
  Author-email: "C. Emre Karataş" <karatasqemre@gmail.com>
6
6
  License: MIT
@@ -59,9 +59,9 @@ Requires-Dist: pymongo>=4.6.0; extra == "all"
59
59
  [![Python 3.8+](https://img.shields.io/badge/python-3.8+-blue.svg)](https://www.python.org/downloads/)
60
60
  [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
61
61
 
62
- **Memory-enabled AI assistant with local LLM support**
62
+ **Memory-enabled AI assistant with multi-backend LLM support (Ollama, LM Studio, Gemini)**
63
63
 
64
- Mem-LLM is a powerful Python library that brings persistent memory capabilities to local Large Language Models. Build AI assistants that remember user interactions, manage knowledge bases, and work completely offline with Ollama.
64
+ Mem-LLM is a powerful Python library that brings persistent memory capabilities to Large Language Models. Build AI assistants that remember user interactions, manage knowledge bases, and choose between local (Ollama, LM Studio) or cloud (Gemini) backends.
65
65
 
66
66
  ## 🔗 Links
67
67
 
@@ -70,29 +70,31 @@ Mem-LLM is a powerful Python library that brings persistent memory capabilities
70
70
  - **Issues**: https://github.com/emredeveloper/Mem-LLM/issues
71
71
  - **Documentation**: See examples/ directory
72
72
 
73
- ## 🆕 What's New in v1.2.0
73
+ ## 🆕 What's New in v1.3.0
74
74
 
75
- - **Conversation Summarization**: Automatic conversation compression (~40-60% token reduction)
76
- - 📤 **Data Export/Import**: JSON, CSV, SQLite, PostgreSQL, MongoDB support
77
- - 🗄️ **Multi-Database**: Enterprise-ready PostgreSQL & MongoDB integration
78
- - �️ **In-Memory DB**: Use `:memory:` for temporary operations
79
- - **Cleaner Logs**: Default WARNING level for production-ready output
80
- - **Bug Fixes**: Database path handling, organized SQLite files
75
+ - 🔌 **Multi-Backend Support**: Choose between Ollama (local), LM Studio (local), or Google Gemini (cloud)
76
+ - 🏗️ **Factory Pattern**: Clean, extensible architecture for easy backend switching
77
+ - 🔍 **Auto-Detection**: Automatically finds and uses available local LLM services
78
+ - **Unified API**: Same code works across all backends - just change one parameter
79
+ - 📚 **New Examples**: 4 additional examples showing multi-backend usage
80
+ - 🎯 **Backward Compatible**: All v1.2.0 code still works without changes
81
81
 
82
- [See full changelog](CHANGELOG.md#120---2025-10-21)
82
+ [See full changelog](CHANGELOG.md#130---2025-10-31)
83
83
 
84
84
  ## ✨ Key Features
85
85
 
86
+ - 🔌 **Multi-Backend Support** (v1.3.0+) - Choose Ollama, LM Studio, or Gemini with unified API
87
+ - 🔍 **Auto-Detection** (v1.3.0+) - Automatically find and use available LLM services
86
88
  - 🧠 **Persistent Memory** - Remembers conversations across sessions
87
- - 🤖 **Universal Ollama Support** - Works with ALL Ollama models (Qwen3, DeepSeek, Llama3, Granite, etc.)
89
+ - 🤖 **Universal Model Support** - Works with 100+ Ollama models, LM Studio models, and Gemini
88
90
  - 💾 **Dual Storage Modes** - JSON (simple) or SQLite (advanced) memory backends
89
91
  - 📚 **Knowledge Base** - Built-in FAQ/support system with categorized entries
90
92
  - 🎯 **Dynamic Prompts** - Context-aware system prompts that adapt to active features
91
93
  - 👥 **Multi-User Support** - Separate memory spaces for different users
92
94
  - 🔧 **Memory Tools** - Search, export, and manage stored memories
93
95
  - 🎨 **Flexible Configuration** - Personal or business usage modes
94
- - 📊 **Production Ready** - Comprehensive test suite with 34+ automated tests
95
- - 🔒 **100% Local & Private** - No cloud dependencies, your data stays yours
96
+ - 📊 **Production Ready** - Comprehensive test suite with 50+ automated tests
97
+ - 🔒 **Privacy Options** - 100% local (Ollama/LM Studio) or cloud (Gemini)
96
98
  - 🛡️ **Prompt Injection Protection** (v1.1.0+) - Advanced security against prompt attacks (opt-in)
97
99
  - ⚡ **High Performance** (v1.1.0+) - Thread-safe operations, 15K+ msg/s throughput
98
100
  - 🔄 **Retry Logic** (v1.1.0+) - Automatic exponential backoff for network errors
@@ -130,8 +132,9 @@ pip install -U mem-llm
130
132
 
131
133
  ### Prerequisites
132
134
 
133
- Install and start [Ollama](https://ollama.ai):
135
+ **Choose one of the following LLM backends:**
134
136
 
137
+ #### Option 1: Ollama (Local, Privacy-First)
135
138
  ```bash
136
139
  # Install Ollama (visit https://ollama.ai)
137
140
  # Then pull a model
@@ -141,15 +144,38 @@ ollama pull granite4:tiny-h
141
144
  ollama serve
142
145
  ```
143
146
 
147
+ #### Option 2: LM Studio (Local, GUI-Based)
148
+ ```bash
149
+ # 1. Download and install LM Studio: https://lmstudio.ai
150
+ # 2. Download a model from the UI
151
+ # 3. Start the local server (default port: 1234)
152
+ ```
153
+
154
+ #### Option 3: Google Gemini (Cloud, Powerful)
155
+ ```bash
156
+ # Get API key from: https://makersuite.google.com/app/apikey
157
+ # Set environment variable
158
+ export GEMINI_API_KEY="your-api-key-here"
159
+ ```
160
+
144
161
  ### Basic Usage
145
162
 
146
163
  ```python
147
164
  from mem_llm import MemAgent
148
165
 
149
- # Create an agent
166
+ # Option 1: Use Ollama (default)
150
167
  agent = MemAgent(model="granite4:tiny-h")
151
168
 
152
- # Set user and chat
169
+ # Option 2: Use LM Studio
170
+ agent = MemAgent(backend='lmstudio', model='local-model')
171
+
172
+ # Option 3: Use Gemini
173
+ agent = MemAgent(backend='gemini', model='gemini-2.5-flash', api_key='your-key')
174
+
175
+ # Option 4: Auto-detect available backend
176
+ agent = MemAgent(auto_detect_backend=True)
177
+
178
+ # Set user and chat (same for all backends!)
153
179
  agent.set_user("alice")
154
180
  response = agent.chat("My name is Alice and I love Python!")
155
181
  print(response)
@@ -159,10 +185,34 @@ response = agent.chat("What's my name and what do I love?")
159
185
  print(response) # Agent remembers: "Your name is Alice and you love Python!"
160
186
  ```
161
187
 
162
- That's it! Just 5 lines of code to get started.
188
+ That's it! Just 5 lines of code to get started with any backend.
163
189
 
164
190
  ## 📖 Usage Examples
165
191
 
192
+ ### Multi-Backend Examples (v1.3.0+)
193
+
194
+ ```python
195
+ from mem_llm import MemAgent
196
+
197
+ # LM Studio - Fast local inference
198
+ agent = MemAgent(
199
+ backend='lmstudio',
200
+ model='local-model',
201
+ base_url='http://localhost:1234'
202
+ )
203
+
204
+ # Google Gemini - Cloud power
205
+ agent = MemAgent(
206
+ backend='gemini',
207
+ model='gemini-2.5-flash',
208
+ api_key='your-api-key'
209
+ )
210
+
211
+ # Auto-detect - Universal compatibility
212
+ agent = MemAgent(auto_detect_backend=True)
213
+ print(f"Using: {agent.llm.get_backend_info()['name']}")
214
+ ```
215
+
166
216
  ### Multi-User Conversations
167
217
 
168
218
  ```python
@@ -379,16 +429,21 @@ Mem-LLM works with **ALL Ollama models**, including:
379
429
  ```
380
430
  mem-llm/
381
431
  ├── mem_llm/
382
- │ ├── mem_agent.py # Main agent class
383
- │ ├── memory_manager.py # JSON memory backend
384
- │ ├── memory_db.py # SQL memory backend
385
- │ ├── llm_client.py # Ollama API client
386
- │ ├── knowledge_loader.py # Knowledge base system
387
- │ ├── dynamic_prompt.py # Context-aware prompts
388
- ├── memory_tools.py # Memory management tools
389
- │ ├── config_manager.py # Configuration handler
390
- └── cli.py # Command-line interface
391
- └── examples/ # Usage examples
432
+ │ ├── mem_agent.py # Main agent class (multi-backend)
433
+ │ ├── base_llm_client.py # Abstract LLM interface
434
+ │ ├── llm_client_factory.py # Backend factory pattern
435
+ │ ├── clients/ # LLM backend implementations
436
+ ├── ollama_client.py # Ollama integration
437
+ ├── lmstudio_client.py # LM Studio integration
438
+ │ └── gemini_client.py # Google Gemini integration
439
+ │ ├── memory_manager.py # JSON memory backend
440
+ ├── memory_db.py # SQL memory backend
441
+ │ ├── knowledge_loader.py # Knowledge base system
442
+ │ ├── dynamic_prompt.py # Context-aware prompts
443
+ │ ├── memory_tools.py # Memory management tools
444
+ │ ├── config_manager.py # Configuration handler
445
+ │ └── cli.py # Command-line interface
446
+ └── examples/ # Usage examples (14 total)
392
447
  ```
393
448
 
394
449
  ## 🔥 Advanced Features
@@ -430,10 +485,12 @@ stats = agent.get_memory_stats()
430
485
  ## 📦 Project Structure
431
486
 
432
487
  ### Core Components
433
- - **MemAgent**: Main interface for building AI assistants
488
+ - **MemAgent**: Main interface for building AI assistants (multi-backend support)
489
+ - **LLMClientFactory**: Factory pattern for backend creation
490
+ - **BaseLLMClient**: Abstract interface for all LLM backends
491
+ - **OllamaClient / LMStudioClient / GeminiClient**: Backend implementations
434
492
  - **MemoryManager**: JSON-based memory storage (simple)
435
493
  - **SQLMemoryManager**: SQLite-based storage (advanced)
436
- - **OllamaClient**: LLM communication handler
437
494
  - **KnowledgeLoader**: Knowledge base management
438
495
 
439
496
  ### Optional Features
@@ -457,14 +514,19 @@ The `examples/` directory contains ready-to-run demonstrations:
457
514
  8. **08_conversation_summarization.py** - Token compression with auto-summary (v1.2.0+)
458
515
  9. **09_data_export_import.py** - Multi-format export/import demo (v1.2.0+)
459
516
  10. **10_database_connection_test.py** - Enterprise PostgreSQL/MongoDB migration (v1.2.0+)
517
+ 11. **11_lmstudio_example.py** - Using LM Studio backend (v1.3.0+)
518
+ 12. **12_gemini_example.py** - Using Google Gemini API (v1.3.0+)
519
+ 13. **13_multi_backend_comparison.py** - Compare different backends (v1.3.0+)
520
+ 14. **14_auto_detect_backend.py** - Auto-detection feature demo (v1.3.0+)
460
521
 
461
522
  ## 📊 Project Status
462
523
 
463
- - **Version**: 1.2.0
524
+ - **Version**: 1.3.0
464
525
  - **Status**: Production Ready
465
- - **Last Updated**: October 21, 2025
466
- - **Test Coverage**: 16/16 automated tests (100% success rate)
526
+ - **Last Updated**: October 31, 2025
527
+ - **Test Coverage**: 50+ automated tests (100% success rate)
467
528
  - **Performance**: Thread-safe operations, <1ms search latency
529
+ - **Backends**: Ollama, LM Studio, Google Gemini
468
530
  - **Databases**: SQLite, PostgreSQL, MongoDB, In-Memory
469
531
 
470
532
  ## 📈 Roadmap
@@ -476,10 +538,14 @@ The `examples/` directory contains ready-to-run demonstrations:
476
538
  - [x] ~~Conversation Summarization~~ (v1.2.0)
477
539
  - [x] ~~Multi-Database Export/Import~~ (v1.2.0)
478
540
  - [x] ~~In-Memory Database~~ (v1.2.0)
541
+ - [x] ~~Multi-Backend Support (Ollama, LM Studio, Gemini)~~ (v1.3.0)
542
+ - [x] ~~Auto-Detection~~ (v1.3.0)
543
+ - [x] ~~Factory Pattern Architecture~~ (v1.3.0)
544
+ - [ ] OpenAI & Claude backends
545
+ - [ ] Streaming support
479
546
  - [ ] Web UI dashboard
480
547
  - [ ] REST API server
481
548
  - [ ] Vector database integration
482
- - [ ] Advanced analytics dashboard
483
549
 
484
550
  ## 📄 License
485
551
 
@@ -4,9 +4,9 @@
4
4
  [![Python 3.8+](https://img.shields.io/badge/python-3.8+-blue.svg)](https://www.python.org/downloads/)
5
5
  [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
6
6
 
7
- **Memory-enabled AI assistant with local LLM support**
7
+ **Memory-enabled AI assistant with multi-backend LLM support (Ollama, LM Studio, Gemini)**
8
8
 
9
- Mem-LLM is a powerful Python library that brings persistent memory capabilities to local Large Language Models. Build AI assistants that remember user interactions, manage knowledge bases, and work completely offline with Ollama.
9
+ Mem-LLM is a powerful Python library that brings persistent memory capabilities to Large Language Models. Build AI assistants that remember user interactions, manage knowledge bases, and choose between local (Ollama, LM Studio) or cloud (Gemini) backends.
10
10
 
11
11
  ## 🔗 Links
12
12
 
@@ -15,29 +15,31 @@ Mem-LLM is a powerful Python library that brings persistent memory capabilities
15
15
  - **Issues**: https://github.com/emredeveloper/Mem-LLM/issues
16
16
  - **Documentation**: See examples/ directory
17
17
 
18
- ## 🆕 What's New in v1.2.0
18
+ ## 🆕 What's New in v1.3.0
19
19
 
20
- - **Conversation Summarization**: Automatic conversation compression (~40-60% token reduction)
21
- - 📤 **Data Export/Import**: JSON, CSV, SQLite, PostgreSQL, MongoDB support
22
- - 🗄️ **Multi-Database**: Enterprise-ready PostgreSQL & MongoDB integration
23
- - �️ **In-Memory DB**: Use `:memory:` for temporary operations
24
- - **Cleaner Logs**: Default WARNING level for production-ready output
25
- - **Bug Fixes**: Database path handling, organized SQLite files
20
+ - 🔌 **Multi-Backend Support**: Choose between Ollama (local), LM Studio (local), or Google Gemini (cloud)
21
+ - 🏗️ **Factory Pattern**: Clean, extensible architecture for easy backend switching
22
+ - 🔍 **Auto-Detection**: Automatically finds and uses available local LLM services
23
+ - **Unified API**: Same code works across all backends - just change one parameter
24
+ - 📚 **New Examples**: 4 additional examples showing multi-backend usage
25
+ - 🎯 **Backward Compatible**: All v1.2.0 code still works without changes
26
26
 
27
- [See full changelog](CHANGELOG.md#120---2025-10-21)
27
+ [See full changelog](CHANGELOG.md#130---2025-10-31)
28
28
 
29
29
  ## ✨ Key Features
30
30
 
31
+ - 🔌 **Multi-Backend Support** (v1.3.0+) - Choose Ollama, LM Studio, or Gemini with unified API
32
+ - 🔍 **Auto-Detection** (v1.3.0+) - Automatically find and use available LLM services
31
33
  - 🧠 **Persistent Memory** - Remembers conversations across sessions
32
- - 🤖 **Universal Ollama Support** - Works with ALL Ollama models (Qwen3, DeepSeek, Llama3, Granite, etc.)
34
+ - 🤖 **Universal Model Support** - Works with 100+ Ollama models, LM Studio models, and Gemini
33
35
  - 💾 **Dual Storage Modes** - JSON (simple) or SQLite (advanced) memory backends
34
36
  - 📚 **Knowledge Base** - Built-in FAQ/support system with categorized entries
35
37
  - 🎯 **Dynamic Prompts** - Context-aware system prompts that adapt to active features
36
38
  - 👥 **Multi-User Support** - Separate memory spaces for different users
37
39
  - 🔧 **Memory Tools** - Search, export, and manage stored memories
38
40
  - 🎨 **Flexible Configuration** - Personal or business usage modes
39
- - 📊 **Production Ready** - Comprehensive test suite with 34+ automated tests
40
- - 🔒 **100% Local & Private** - No cloud dependencies, your data stays yours
41
+ - 📊 **Production Ready** - Comprehensive test suite with 50+ automated tests
42
+ - 🔒 **Privacy Options** - 100% local (Ollama/LM Studio) or cloud (Gemini)
41
43
  - 🛡️ **Prompt Injection Protection** (v1.1.0+) - Advanced security against prompt attacks (opt-in)
42
44
  - ⚡ **High Performance** (v1.1.0+) - Thread-safe operations, 15K+ msg/s throughput
43
45
  - 🔄 **Retry Logic** (v1.1.0+) - Automatic exponential backoff for network errors
@@ -75,8 +77,9 @@ pip install -U mem-llm
75
77
 
76
78
  ### Prerequisites
77
79
 
78
- Install and start [Ollama](https://ollama.ai):
80
+ **Choose one of the following LLM backends:**
79
81
 
82
+ #### Option 1: Ollama (Local, Privacy-First)
80
83
  ```bash
81
84
  # Install Ollama (visit https://ollama.ai)
82
85
  # Then pull a model
@@ -86,15 +89,38 @@ ollama pull granite4:tiny-h
86
89
  ollama serve
87
90
  ```
88
91
 
92
+ #### Option 2: LM Studio (Local, GUI-Based)
93
+ ```bash
94
+ # 1. Download and install LM Studio: https://lmstudio.ai
95
+ # 2. Download a model from the UI
96
+ # 3. Start the local server (default port: 1234)
97
+ ```
98
+
99
+ #### Option 3: Google Gemini (Cloud, Powerful)
100
+ ```bash
101
+ # Get API key from: https://makersuite.google.com/app/apikey
102
+ # Set environment variable
103
+ export GEMINI_API_KEY="your-api-key-here"
104
+ ```
105
+
89
106
  ### Basic Usage
90
107
 
91
108
  ```python
92
109
  from mem_llm import MemAgent
93
110
 
94
- # Create an agent
111
+ # Option 1: Use Ollama (default)
95
112
  agent = MemAgent(model="granite4:tiny-h")
96
113
 
97
- # Set user and chat
114
+ # Option 2: Use LM Studio
115
+ agent = MemAgent(backend='lmstudio', model='local-model')
116
+
117
+ # Option 3: Use Gemini
118
+ agent = MemAgent(backend='gemini', model='gemini-2.5-flash', api_key='your-key')
119
+
120
+ # Option 4: Auto-detect available backend
121
+ agent = MemAgent(auto_detect_backend=True)
122
+
123
+ # Set user and chat (same for all backends!)
98
124
  agent.set_user("alice")
99
125
  response = agent.chat("My name is Alice and I love Python!")
100
126
  print(response)
@@ -104,10 +130,34 @@ response = agent.chat("What's my name and what do I love?")
104
130
  print(response) # Agent remembers: "Your name is Alice and you love Python!"
105
131
  ```
106
132
 
107
- That's it! Just 5 lines of code to get started.
133
+ That's it! Just 5 lines of code to get started with any backend.
108
134
 
109
135
  ## 📖 Usage Examples
110
136
 
137
+ ### Multi-Backend Examples (v1.3.0+)
138
+
139
+ ```python
140
+ from mem_llm import MemAgent
141
+
142
+ # LM Studio - Fast local inference
143
+ agent = MemAgent(
144
+ backend='lmstudio',
145
+ model='local-model',
146
+ base_url='http://localhost:1234'
147
+ )
148
+
149
+ # Google Gemini - Cloud power
150
+ agent = MemAgent(
151
+ backend='gemini',
152
+ model='gemini-2.5-flash',
153
+ api_key='your-api-key'
154
+ )
155
+
156
+ # Auto-detect - Universal compatibility
157
+ agent = MemAgent(auto_detect_backend=True)
158
+ print(f"Using: {agent.llm.get_backend_info()['name']}")
159
+ ```
160
+
111
161
  ### Multi-User Conversations
112
162
 
113
163
  ```python
@@ -324,16 +374,21 @@ Mem-LLM works with **ALL Ollama models**, including:
324
374
  ```
325
375
  mem-llm/
326
376
  ├── mem_llm/
327
- │ ├── mem_agent.py # Main agent class
328
- │ ├── memory_manager.py # JSON memory backend
329
- │ ├── memory_db.py # SQL memory backend
330
- │ ├── llm_client.py # Ollama API client
331
- │ ├── knowledge_loader.py # Knowledge base system
332
- │ ├── dynamic_prompt.py # Context-aware prompts
333
- ├── memory_tools.py # Memory management tools
334
- │ ├── config_manager.py # Configuration handler
335
- └── cli.py # Command-line interface
336
- └── examples/ # Usage examples
377
+ │ ├── mem_agent.py # Main agent class (multi-backend)
378
+ │ ├── base_llm_client.py # Abstract LLM interface
379
+ │ ├── llm_client_factory.py # Backend factory pattern
380
+ │ ├── clients/ # LLM backend implementations
381
+ ├── ollama_client.py # Ollama integration
382
+ ├── lmstudio_client.py # LM Studio integration
383
+ │ └── gemini_client.py # Google Gemini integration
384
+ │ ├── memory_manager.py # JSON memory backend
385
+ ├── memory_db.py # SQL memory backend
386
+ │ ├── knowledge_loader.py # Knowledge base system
387
+ │ ├── dynamic_prompt.py # Context-aware prompts
388
+ │ ├── memory_tools.py # Memory management tools
389
+ │ ├── config_manager.py # Configuration handler
390
+ │ └── cli.py # Command-line interface
391
+ └── examples/ # Usage examples (14 total)
337
392
  ```
338
393
 
339
394
  ## 🔥 Advanced Features
@@ -375,10 +430,12 @@ stats = agent.get_memory_stats()
375
430
  ## 📦 Project Structure
376
431
 
377
432
  ### Core Components
378
- - **MemAgent**: Main interface for building AI assistants
433
+ - **MemAgent**: Main interface for building AI assistants (multi-backend support)
434
+ - **LLMClientFactory**: Factory pattern for backend creation
435
+ - **BaseLLMClient**: Abstract interface for all LLM backends
436
+ - **OllamaClient / LMStudioClient / GeminiClient**: Backend implementations
379
437
  - **MemoryManager**: JSON-based memory storage (simple)
380
438
  - **SQLMemoryManager**: SQLite-based storage (advanced)
381
- - **OllamaClient**: LLM communication handler
382
439
  - **KnowledgeLoader**: Knowledge base management
383
440
 
384
441
  ### Optional Features
@@ -402,14 +459,19 @@ The `examples/` directory contains ready-to-run demonstrations:
402
459
  8. **08_conversation_summarization.py** - Token compression with auto-summary (v1.2.0+)
403
460
  9. **09_data_export_import.py** - Multi-format export/import demo (v1.2.0+)
404
461
  10. **10_database_connection_test.py** - Enterprise PostgreSQL/MongoDB migration (v1.2.0+)
462
+ 11. **11_lmstudio_example.py** - Using LM Studio backend (v1.3.0+)
463
+ 12. **12_gemini_example.py** - Using Google Gemini API (v1.3.0+)
464
+ 13. **13_multi_backend_comparison.py** - Compare different backends (v1.3.0+)
465
+ 14. **14_auto_detect_backend.py** - Auto-detection feature demo (v1.3.0+)
405
466
 
406
467
  ## 📊 Project Status
407
468
 
408
- - **Version**: 1.2.0
469
+ - **Version**: 1.3.0
409
470
  - **Status**: Production Ready
410
- - **Last Updated**: October 21, 2025
411
- - **Test Coverage**: 16/16 automated tests (100% success rate)
471
+ - **Last Updated**: October 31, 2025
472
+ - **Test Coverage**: 50+ automated tests (100% success rate)
412
473
  - **Performance**: Thread-safe operations, <1ms search latency
474
+ - **Backends**: Ollama, LM Studio, Google Gemini
413
475
  - **Databases**: SQLite, PostgreSQL, MongoDB, In-Memory
414
476
 
415
477
  ## 📈 Roadmap
@@ -421,10 +483,14 @@ The `examples/` directory contains ready-to-run demonstrations:
421
483
  - [x] ~~Conversation Summarization~~ (v1.2.0)
422
484
  - [x] ~~Multi-Database Export/Import~~ (v1.2.0)
423
485
  - [x] ~~In-Memory Database~~ (v1.2.0)
486
+ - [x] ~~Multi-Backend Support (Ollama, LM Studio, Gemini)~~ (v1.3.0)
487
+ - [x] ~~Auto-Detection~~ (v1.3.0)
488
+ - [x] ~~Factory Pattern Architecture~~ (v1.3.0)
489
+ - [ ] OpenAI & Claude backends
490
+ - [ ] Streaming support
424
491
  - [ ] Web UI dashboard
425
492
  - [ ] REST API server
426
493
  - [ ] Vector database integration
427
- - [ ] Advanced analytics dashboard
428
494
 
429
495
  ## 📄 License
430
496
 
@@ -63,7 +63,7 @@ try:
63
63
  except ImportError:
64
64
  __all_export_import__ = []
65
65
 
66
- __version__ = "1.3.0"
66
+ __version__ = "1.3.1"
67
67
  __author__ = "C. Emre Karataş"
68
68
 
69
69
  # Multi-backend LLM support (v1.3.0+)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: mem-llm
3
- Version: 1.3.0
3
+ Version: 1.3.1
4
4
  Summary: Memory-enabled AI assistant with multi-backend LLM support (Ollama, LM Studio, Gemini) - Local and cloud ready
5
5
  Author-email: "C. Emre Karataş" <karatasqemre@gmail.com>
6
6
  License: MIT
@@ -59,9 +59,9 @@ Requires-Dist: pymongo>=4.6.0; extra == "all"
59
59
  [![Python 3.8+](https://img.shields.io/badge/python-3.8+-blue.svg)](https://www.python.org/downloads/)
60
60
  [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
61
61
 
62
- **Memory-enabled AI assistant with local LLM support**
62
+ **Memory-enabled AI assistant with multi-backend LLM support (Ollama, LM Studio, Gemini)**
63
63
 
64
- Mem-LLM is a powerful Python library that brings persistent memory capabilities to local Large Language Models. Build AI assistants that remember user interactions, manage knowledge bases, and work completely offline with Ollama.
64
+ Mem-LLM is a powerful Python library that brings persistent memory capabilities to Large Language Models. Build AI assistants that remember user interactions, manage knowledge bases, and choose between local (Ollama, LM Studio) or cloud (Gemini) backends.
65
65
 
66
66
  ## 🔗 Links
67
67
 
@@ -70,29 +70,31 @@ Mem-LLM is a powerful Python library that brings persistent memory capabilities
70
70
  - **Issues**: https://github.com/emredeveloper/Mem-LLM/issues
71
71
  - **Documentation**: See examples/ directory
72
72
 
73
- ## 🆕 What's New in v1.2.0
73
+ ## 🆕 What's New in v1.3.0
74
74
 
75
- - **Conversation Summarization**: Automatic conversation compression (~40-60% token reduction)
76
- - 📤 **Data Export/Import**: JSON, CSV, SQLite, PostgreSQL, MongoDB support
77
- - 🗄️ **Multi-Database**: Enterprise-ready PostgreSQL & MongoDB integration
78
- - �️ **In-Memory DB**: Use `:memory:` for temporary operations
79
- - **Cleaner Logs**: Default WARNING level for production-ready output
80
- - **Bug Fixes**: Database path handling, organized SQLite files
75
+ - 🔌 **Multi-Backend Support**: Choose between Ollama (local), LM Studio (local), or Google Gemini (cloud)
76
+ - 🏗️ **Factory Pattern**: Clean, extensible architecture for easy backend switching
77
+ - 🔍 **Auto-Detection**: Automatically finds and uses available local LLM services
78
+ - **Unified API**: Same code works across all backends - just change one parameter
79
+ - 📚 **New Examples**: 4 additional examples showing multi-backend usage
80
+ - 🎯 **Backward Compatible**: All v1.2.0 code still works without changes
81
81
 
82
- [See full changelog](CHANGELOG.md#120---2025-10-21)
82
+ [See full changelog](CHANGELOG.md#130---2025-10-31)
83
83
 
84
84
  ## ✨ Key Features
85
85
 
86
+ - 🔌 **Multi-Backend Support** (v1.3.0+) - Choose Ollama, LM Studio, or Gemini with unified API
87
+ - 🔍 **Auto-Detection** (v1.3.0+) - Automatically find and use available LLM services
86
88
  - 🧠 **Persistent Memory** - Remembers conversations across sessions
87
- - 🤖 **Universal Ollama Support** - Works with ALL Ollama models (Qwen3, DeepSeek, Llama3, Granite, etc.)
89
+ - 🤖 **Universal Model Support** - Works with 100+ Ollama models, LM Studio models, and Gemini
88
90
  - 💾 **Dual Storage Modes** - JSON (simple) or SQLite (advanced) memory backends
89
91
  - 📚 **Knowledge Base** - Built-in FAQ/support system with categorized entries
90
92
  - 🎯 **Dynamic Prompts** - Context-aware system prompts that adapt to active features
91
93
  - 👥 **Multi-User Support** - Separate memory spaces for different users
92
94
  - 🔧 **Memory Tools** - Search, export, and manage stored memories
93
95
  - 🎨 **Flexible Configuration** - Personal or business usage modes
94
- - 📊 **Production Ready** - Comprehensive test suite with 34+ automated tests
95
- - 🔒 **100% Local & Private** - No cloud dependencies, your data stays yours
96
+ - 📊 **Production Ready** - Comprehensive test suite with 50+ automated tests
97
+ - 🔒 **Privacy Options** - 100% local (Ollama/LM Studio) or cloud (Gemini)
96
98
  - 🛡️ **Prompt Injection Protection** (v1.1.0+) - Advanced security against prompt attacks (opt-in)
97
99
  - ⚡ **High Performance** (v1.1.0+) - Thread-safe operations, 15K+ msg/s throughput
98
100
  - 🔄 **Retry Logic** (v1.1.0+) - Automatic exponential backoff for network errors
@@ -130,8 +132,9 @@ pip install -U mem-llm
130
132
 
131
133
  ### Prerequisites
132
134
 
133
- Install and start [Ollama](https://ollama.ai):
135
+ **Choose one of the following LLM backends:**
134
136
 
137
+ #### Option 1: Ollama (Local, Privacy-First)
135
138
  ```bash
136
139
  # Install Ollama (visit https://ollama.ai)
137
140
  # Then pull a model
@@ -141,15 +144,38 @@ ollama pull granite4:tiny-h
141
144
  ollama serve
142
145
  ```
143
146
 
147
+ #### Option 2: LM Studio (Local, GUI-Based)
148
+ ```bash
149
+ # 1. Download and install LM Studio: https://lmstudio.ai
150
+ # 2. Download a model from the UI
151
+ # 3. Start the local server (default port: 1234)
152
+ ```
153
+
154
+ #### Option 3: Google Gemini (Cloud, Powerful)
155
+ ```bash
156
+ # Get API key from: https://makersuite.google.com/app/apikey
157
+ # Set environment variable
158
+ export GEMINI_API_KEY="your-api-key-here"
159
+ ```
160
+
144
161
  ### Basic Usage
145
162
 
146
163
  ```python
147
164
  from mem_llm import MemAgent
148
165
 
149
- # Create an agent
166
+ # Option 1: Use Ollama (default)
150
167
  agent = MemAgent(model="granite4:tiny-h")
151
168
 
152
- # Set user and chat
169
+ # Option 2: Use LM Studio
170
+ agent = MemAgent(backend='lmstudio', model='local-model')
171
+
172
+ # Option 3: Use Gemini
173
+ agent = MemAgent(backend='gemini', model='gemini-2.5-flash', api_key='your-key')
174
+
175
+ # Option 4: Auto-detect available backend
176
+ agent = MemAgent(auto_detect_backend=True)
177
+
178
+ # Set user and chat (same for all backends!)
153
179
  agent.set_user("alice")
154
180
  response = agent.chat("My name is Alice and I love Python!")
155
181
  print(response)
@@ -159,10 +185,34 @@ response = agent.chat("What's my name and what do I love?")
159
185
  print(response) # Agent remembers: "Your name is Alice and you love Python!"
160
186
  ```
161
187
 
162
- That's it! Just 5 lines of code to get started.
188
+ That's it! Just 5 lines of code to get started with any backend.
163
189
 
164
190
  ## 📖 Usage Examples
165
191
 
192
+ ### Multi-Backend Examples (v1.3.0+)
193
+
194
+ ```python
195
+ from mem_llm import MemAgent
196
+
197
+ # LM Studio - Fast local inference
198
+ agent = MemAgent(
199
+ backend='lmstudio',
200
+ model='local-model',
201
+ base_url='http://localhost:1234'
202
+ )
203
+
204
+ # Google Gemini - Cloud power
205
+ agent = MemAgent(
206
+ backend='gemini',
207
+ model='gemini-2.5-flash',
208
+ api_key='your-api-key'
209
+ )
210
+
211
+ # Auto-detect - Universal compatibility
212
+ agent = MemAgent(auto_detect_backend=True)
213
+ print(f"Using: {agent.llm.get_backend_info()['name']}")
214
+ ```
215
+
166
216
  ### Multi-User Conversations
167
217
 
168
218
  ```python
@@ -379,16 +429,21 @@ Mem-LLM works with **ALL Ollama models**, including:
379
429
  ```
380
430
  mem-llm/
381
431
  ├── mem_llm/
382
- │ ├── mem_agent.py # Main agent class
383
- │ ├── memory_manager.py # JSON memory backend
384
- │ ├── memory_db.py # SQL memory backend
385
- │ ├── llm_client.py # Ollama API client
386
- │ ├── knowledge_loader.py # Knowledge base system
387
- │ ├── dynamic_prompt.py # Context-aware prompts
388
- ├── memory_tools.py # Memory management tools
389
- │ ├── config_manager.py # Configuration handler
390
- └── cli.py # Command-line interface
391
- └── examples/ # Usage examples
432
+ │ ├── mem_agent.py # Main agent class (multi-backend)
433
+ │ ├── base_llm_client.py # Abstract LLM interface
434
+ │ ├── llm_client_factory.py # Backend factory pattern
435
+ │ ├── clients/ # LLM backend implementations
436
+ ├── ollama_client.py # Ollama integration
437
+ ├── lmstudio_client.py # LM Studio integration
438
+ │ └── gemini_client.py # Google Gemini integration
439
+ │ ├── memory_manager.py # JSON memory backend
440
+ ├── memory_db.py # SQL memory backend
441
+ │ ├── knowledge_loader.py # Knowledge base system
442
+ │ ├── dynamic_prompt.py # Context-aware prompts
443
+ │ ├── memory_tools.py # Memory management tools
444
+ │ ├── config_manager.py # Configuration handler
445
+ │ └── cli.py # Command-line interface
446
+ └── examples/ # Usage examples (14 total)
392
447
  ```
393
448
 
394
449
  ## 🔥 Advanced Features
@@ -430,10 +485,12 @@ stats = agent.get_memory_stats()
430
485
  ## 📦 Project Structure
431
486
 
432
487
  ### Core Components
433
- - **MemAgent**: Main interface for building AI assistants
488
+ - **MemAgent**: Main interface for building AI assistants (multi-backend support)
489
+ - **LLMClientFactory**: Factory pattern for backend creation
490
+ - **BaseLLMClient**: Abstract interface for all LLM backends
491
+ - **OllamaClient / LMStudioClient / GeminiClient**: Backend implementations
434
492
  - **MemoryManager**: JSON-based memory storage (simple)
435
493
  - **SQLMemoryManager**: SQLite-based storage (advanced)
436
- - **OllamaClient**: LLM communication handler
437
494
  - **KnowledgeLoader**: Knowledge base management
438
495
 
439
496
  ### Optional Features
@@ -457,14 +514,19 @@ The `examples/` directory contains ready-to-run demonstrations:
457
514
  8. **08_conversation_summarization.py** - Token compression with auto-summary (v1.2.0+)
458
515
  9. **09_data_export_import.py** - Multi-format export/import demo (v1.2.0+)
459
516
  10. **10_database_connection_test.py** - Enterprise PostgreSQL/MongoDB migration (v1.2.0+)
517
+ 11. **11_lmstudio_example.py** - Using LM Studio backend (v1.3.0+)
518
+ 12. **12_gemini_example.py** - Using Google Gemini API (v1.3.0+)
519
+ 13. **13_multi_backend_comparison.py** - Compare different backends (v1.3.0+)
520
+ 14. **14_auto_detect_backend.py** - Auto-detection feature demo (v1.3.0+)
460
521
 
461
522
  ## 📊 Project Status
462
523
 
463
- - **Version**: 1.2.0
524
+ - **Version**: 1.3.0
464
525
  - **Status**: Production Ready
465
- - **Last Updated**: October 21, 2025
466
- - **Test Coverage**: 16/16 automated tests (100% success rate)
526
+ - **Last Updated**: October 31, 2025
527
+ - **Test Coverage**: 50+ automated tests (100% success rate)
467
528
  - **Performance**: Thread-safe operations, <1ms search latency
529
+ - **Backends**: Ollama, LM Studio, Google Gemini
468
530
  - **Databases**: SQLite, PostgreSQL, MongoDB, In-Memory
469
531
 
470
532
  ## 📈 Roadmap
@@ -476,10 +538,14 @@ The `examples/` directory contains ready-to-run demonstrations:
476
538
  - [x] ~~Conversation Summarization~~ (v1.2.0)
477
539
  - [x] ~~Multi-Database Export/Import~~ (v1.2.0)
478
540
  - [x] ~~In-Memory Database~~ (v1.2.0)
541
+ - [x] ~~Multi-Backend Support (Ollama, LM Studio, Gemini)~~ (v1.3.0)
542
+ - [x] ~~Auto-Detection~~ (v1.3.0)
543
+ - [x] ~~Factory Pattern Architecture~~ (v1.3.0)
544
+ - [ ] OpenAI & Claude backends
545
+ - [ ] Streaming support
479
546
  - [ ] Web UI dashboard
480
547
  - [ ] REST API server
481
548
  - [ ] Vector database integration
482
- - [ ] Advanced analytics dashboard
483
549
 
484
550
  ## 📄 License
485
551
 
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "mem-llm"
7
- version = "1.3.0"
7
+ version = "1.3.1"
8
8
  description = "Memory-enabled AI assistant with multi-backend LLM support (Ollama, LM Studio, Gemini) - Local and cloud ready"
9
9
  readme = "README.md"
10
10
  requires-python = ">=3.8"
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes