mem-llm 1.0.10__py3-none-any.whl → 1.0.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mem-llm might be problematic. Click here for more details.

mem_llm/__init__.py CHANGED
@@ -24,7 +24,7 @@ try:
24
24
  except ImportError:
25
25
  __all_pro__ = []
26
26
 
27
- __version__ = "1.0.10"
27
+ __version__ = "1.0.11"
28
28
  __author__ = "C. Emre Karataş"
29
29
 
30
30
  # CLI
@@ -0,0 +1,455 @@
1
+ Metadata-Version: 2.2
2
+ Name: mem-llm
3
+ Version: 1.0.11
4
+ Summary: Memory-enabled AI assistant with local LLM support
5
+ Author-email: "C. Emre Karataş" <karatasqemre@gmail.com>
6
+ License: MIT
7
+ Project-URL: Homepage, https://github.com/emredeveloper/Mem-LLM
8
+ Project-URL: Bug Reports, https://github.com/emredeveloper/Mem-LLM/issues
9
+ Project-URL: Source, https://github.com/emredeveloper/Mem-LLM
10
+ Keywords: llm,ai,memory,agent,chatbot,ollama,local
11
+ Classifier: Development Status :: 4 - Beta
12
+ Classifier: Intended Audience :: Developers
13
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
14
+ Classifier: Programming Language :: Python :: 3
15
+ Classifier: Programming Language :: Python :: 3.8
16
+ Classifier: Programming Language :: Python :: 3.9
17
+ Classifier: Programming Language :: Python :: 3.10
18
+ Classifier: Programming Language :: Python :: 3.11
19
+ Classifier: Programming Language :: Python :: 3.12
20
+ Requires-Python: >=3.8
21
+ Description-Content-Type: text/markdown
22
+ Requires-Dist: requests>=2.31.0
23
+ Requires-Dist: pyyaml>=6.0.1
24
+ Requires-Dist: click>=8.1.0
25
+ Provides-Extra: dev
26
+ Requires-Dist: pytest>=7.4.0; extra == "dev"
27
+ Requires-Dist: pytest-cov>=4.1.0; extra == "dev"
28
+ Requires-Dist: black>=23.7.0; extra == "dev"
29
+ Requires-Dist: flake8>=6.1.0; extra == "dev"
30
+ Provides-Extra: web
31
+ Requires-Dist: flask>=3.0.0; extra == "web"
32
+ Requires-Dist: flask-cors>=4.0.0; extra == "web"
33
+ Provides-Extra: api
34
+ Requires-Dist: fastapi>=0.104.0; extra == "api"
35
+ Requires-Dist: uvicorn>=0.24.0; extra == "api"
36
+
37
+ # 🧠 Mem-LLM
38
+
39
+ [![PyPI version](https://badge.fury.io/py/mem-llm.svg)](https://badge.fury.io/py/mem-llm)
40
+ [![Python 3.8+](https://img.shields.io/badge/python-3.8+-blue.svg)](https://www.python.org/downloads/)
41
+ [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
42
+
43
+ **Memory-enabled AI assistant with local LLM support**
44
+
45
+ Mem-LLM is a powerful Python library that brings persistent memory capabilities to local Large Language Models. Build AI assistants that remember user interactions, manage knowledge bases, and work completely offline with Ollama.
46
+
47
+ ## ✨ Key Features
48
+
49
+ - 🧠 **Persistent Memory** - Remembers conversations across sessions
50
+ - 🤖 **Universal Ollama Support** - Works with ALL Ollama models (Qwen3, DeepSeek, Llama3, Granite, etc.)
51
+ - 💾 **Dual Storage Modes** - JSON (simple) or SQLite (advanced) memory backends
52
+ - 📚 **Knowledge Base** - Built-in FAQ/support system with categorized entries
53
+ - 🎯 **Dynamic Prompts** - Context-aware system prompts that adapt to active features
54
+ - 👥 **Multi-User Support** - Separate memory spaces for different users
55
+ - 🔧 **Memory Tools** - Search, export, and manage stored memories
56
+ - 🎨 **Flexible Configuration** - Personal or business usage modes
57
+ - 📊 **Production Ready** - Comprehensive test suite with 34+ automated tests
58
+ - 🔒 **100% Local & Private** - No cloud dependencies, your data stays yours
59
+
60
+ ## 🚀 Quick Start
61
+
62
+ ### Installation
63
+
64
+ ```bash
65
+ pip install mem-llm
66
+ ```
67
+
68
+ ### Prerequisites
69
+
70
+ Install and start [Ollama](https://ollama.ai):
71
+
72
+ ```bash
73
+ # Install Ollama (visit https://ollama.ai)
74
+ # Then pull a model
75
+ ollama pull granite4:tiny-h
76
+
77
+ # Start Ollama service
78
+ ollama serve
79
+ ```
80
+
81
+ ### Basic Usage
82
+
83
+ ```python
84
+ from mem_llm import MemAgent
85
+
86
+ # Create an agent
87
+ agent = MemAgent(model="granite4:tiny-h")
88
+
89
+ # Set user and chat
90
+ agent.set_user("alice")
91
+ response = agent.chat("My name is Alice and I love Python!")
92
+ print(response)
93
+
94
+ # Memory persists across sessions
95
+ response = agent.chat("What's my name and what do I love?")
96
+ print(response) # Agent remembers: "Your name is Alice and you love Python!"
97
+ ```
98
+
99
+ That's it! Just 5 lines of code to get started.
100
+
101
+ ## 📖 Usage Examples
102
+
103
+ ### Multi-User Conversations
104
+
105
+ ```python
106
+ from mem_llm import MemAgent
107
+
108
+ agent = MemAgent()
109
+
110
+ # User 1
111
+ agent.set_user("alice")
112
+ agent.chat("I'm a Python developer")
113
+
114
+ # User 2
115
+ agent.set_user("bob")
116
+ agent.chat("I'm a JavaScript developer")
117
+
118
+ # Each user has separate memory
119
+ agent.set_user("alice")
120
+ response = agent.chat("What do I do?") # "You're a Python developer"
121
+ ```
122
+
123
+ ### Advanced Configuration
124
+
125
+ ```python
126
+ from mem_llm import MemAgent
127
+
128
+ # Use SQL database with knowledge base
129
+ agent = MemAgent(
130
+ model="qwen3:8b",
131
+ use_sql=True,
132
+ load_knowledge_base=True,
133
+ config_file="config.yaml"
134
+ )
135
+
136
+ # Add knowledge base entry
137
+ agent.add_kb_entry(
138
+ category="FAQ",
139
+ question="What are your hours?",
140
+ answer="We're open 9 AM - 5 PM EST, Monday-Friday"
141
+ )
142
+
143
+ # Agent will use KB to answer
144
+ response = agent.chat("When are you open?")
145
+ ```
146
+
147
+ ### Memory Tools
148
+
149
+ ```python
150
+ from mem_llm import MemAgent
151
+
152
+ agent = MemAgent(use_sql=True)
153
+ agent.set_user("alice")
154
+
155
+ # Chat with memory
156
+ agent.chat("I live in New York")
157
+ agent.chat("I work as a data scientist")
158
+
159
+ # Search memories
160
+ results = agent.search_memories("location")
161
+ print(results) # Finds "New York" memory
162
+
163
+ # Export all data
164
+ data = agent.export_user_data()
165
+ print(f"Total memories: {len(data['memories'])}")
166
+
167
+ # Get statistics
168
+ stats = agent.get_memory_stats()
169
+ print(f"Users: {stats['total_users']}, Memories: {stats['total_memories']}")
170
+ ```
171
+
172
+ ### CLI Interface
173
+
174
+ ```bash
175
+ # Interactive chat
176
+ mem-llm chat
177
+
178
+ # With specific model
179
+ mem-llm chat --model llama3:8b
180
+
181
+ # Customer service mode
182
+ mem-llm customer-service
183
+
184
+ # Knowledge base management
185
+ mem-llm kb add --category "FAQ" --question "How to install?" --answer "Run: pip install mem-llm"
186
+ mem-llm kb list
187
+ mem-llm kb search "install"
188
+ ```
189
+
190
+ ## 🎯 Usage Modes
191
+
192
+ ### Personal Mode (Default)
193
+ - Single user with JSON storage
194
+ - Simple and lightweight
195
+ - Perfect for personal projects
196
+ - No configuration needed
197
+
198
+ ```python
199
+ agent = MemAgent() # Automatically uses personal mode
200
+ ```
201
+
202
+ ### Business Mode
203
+ - Multi-user with SQL database
204
+ - Knowledge base support
205
+ - Advanced memory tools
206
+ - Requires configuration file
207
+
208
+ ```python
209
+ agent = MemAgent(
210
+ config_file="config.yaml",
211
+ use_sql=True,
212
+ load_knowledge_base=True
213
+ )
214
+ ```
215
+
216
+ ## 🔧 Configuration
217
+
218
+ Create a `config.yaml` file for advanced features:
219
+
220
+ ```yaml
221
+ # Usage mode: 'personal' or 'business'
222
+ usage_mode: business
223
+
224
+ # LLM settings
225
+ llm:
226
+ model: granite4:tiny-h
227
+ base_url: http://localhost:11434
228
+ temperature: 0.7
229
+ max_tokens: 2000
230
+
231
+ # Memory settings
232
+ memory:
233
+ type: sql # or 'json'
234
+ db_path: ./data/memory.db
235
+
236
+ # Knowledge base
237
+ knowledge_base:
238
+ enabled: true
239
+ kb_path: ./data/knowledge_base.db
240
+
241
+ # Logging
242
+ logging:
243
+ level: INFO
244
+ file: logs/mem_llm.log
245
+ ```
246
+
247
+ ## 🧪 Supported Models
248
+
249
+ Mem-LLM works with **ALL Ollama models**, including:
250
+
251
+ - ✅ **Thinking Models**: Qwen3, DeepSeek, QwQ
252
+ - ✅ **Standard Models**: Llama3, Granite, Phi, Mistral
253
+ - ✅ **Specialized Models**: CodeLlama, Vicuna, Neural-Chat
254
+ - ✅ **Any Custom Model** in your Ollama library
255
+
256
+ ### Model Compatibility Features
257
+ - 🔄 Automatic thinking mode detection
258
+ - 🎯 Dynamic prompt adaptation
259
+ - ⚡ Token limit optimization (2000 tokens)
260
+ - 🔧 Automatic retry on empty responses
261
+
262
+ ## 📚 Architecture
263
+
264
+ ```
265
+ mem-llm/
266
+ ├── mem_llm/
267
+ │ ├── mem_agent.py # Main agent class
268
+ │ ├── memory_manager.py # JSON memory backend
269
+ │ ├── memory_db.py # SQL memory backend
270
+ │ ├── llm_client.py # Ollama API client
271
+ │ ├── knowledge_loader.py # Knowledge base system
272
+ │ ├── dynamic_prompt.py # Context-aware prompts
273
+ │ ├── memory_tools.py # Memory management tools
274
+ │ ├── config_manager.py # Configuration handler
275
+ │ └── cli.py # Command-line interface
276
+ └── examples/ # Usage examples
277
+ ```
278
+
279
+ ## 🔥 Advanced Features
280
+
281
+ ### Dynamic Prompt System
282
+ Prevents hallucinations by only including instructions for enabled features:
283
+
284
+ ```python
285
+ agent = MemAgent(use_sql=True, load_knowledge_base=True)
286
+ # Agent automatically knows:
287
+ # ✅ Knowledge Base is available
288
+ # ✅ Memory tools are available
289
+ # ✅ SQL storage is active
290
+ ```
291
+
292
+ ### Knowledge Base Categories
293
+ Organize knowledge by category:
294
+
295
+ ```python
296
+ agent.add_kb_entry(category="FAQ", question="...", answer="...")
297
+ agent.add_kb_entry(category="Technical", question="...", answer="...")
298
+ agent.add_kb_entry(category="Billing", question="...", answer="...")
299
+ ```
300
+
301
+ ### Memory Search & Export
302
+ Powerful memory management:
303
+
304
+ ```python
305
+ # Search across all memories
306
+ results = agent.search_memories("python", limit=5)
307
+
308
+ # Export everything
309
+ data = agent.export_user_data()
310
+
311
+ # Get insights
312
+ stats = agent.get_memory_stats()
313
+ ```
314
+
315
+ ## 📦 Project Structure
316
+
317
+ ### Core Components
318
+ - **MemAgent**: Main interface for building AI assistants
319
+ - **MemoryManager**: JSON-based memory storage (simple)
320
+ - **SQLMemoryManager**: SQLite-based storage (advanced)
321
+ - **OllamaClient**: LLM communication handler
322
+ - **KnowledgeLoader**: Knowledge base management
323
+
324
+ ### Optional Features
325
+ - **MemoryTools**: Search, export, statistics
326
+ - **ConfigManager**: YAML configuration
327
+ - **CLI**: Command-line interface
328
+
329
+ ## 🧪 Testing
330
+
331
+ Run the comprehensive test suite:
332
+
333
+ ```bash
334
+ # Install dev dependencies
335
+ pip install -r requirements-dev.txt
336
+
337
+ # Run all tests (34+ automated tests)
338
+ cd tests
339
+ python run_all_tests.py
340
+
341
+ # Run specific test
342
+ python -m pytest test_mem_agent.py -v
343
+ ```
344
+
345
+ ### Test Coverage
346
+ - ✅ Core imports and dependencies
347
+ - ✅ CLI functionality
348
+ - ✅ Ollama connection and models
349
+ - ✅ JSON memory operations
350
+ - ✅ SQL memory operations
351
+ - ✅ MemAgent features
352
+ - ✅ Configuration management
353
+ - ✅ Multi-user scenarios
354
+ - ✅ Hallucination detection
355
+
356
+ ## 📝 Examples
357
+
358
+ The `examples/` directory contains ready-to-run demonstrations:
359
+
360
+ 1. **01_hello_world.py** - Simplest possible example (5 lines)
361
+ 2. **02_basic_memory.py** - Memory persistence basics
362
+ 3. **03_multi_user.py** - Multiple users with separate memories
363
+ 4. **04_customer_service.py** - Real-world customer service scenario
364
+ 5. **05_knowledge_base.py** - FAQ/support system
365
+ 6. **06_cli_demo.py** - Command-line interface examples
366
+ 7. **07_document_config.py** - Configuration from documents
367
+
368
+ ## 🛠️ Development
369
+
370
+ ### Setup Development Environment
371
+
372
+ ```bash
373
+ git clone https://github.com/emredeveloper/Mem-LLM.git
374
+ cd Mem-LLM
375
+ pip install -e .
376
+ pip install -r requirements-dev.txt
377
+ ```
378
+
379
+ ### Running Tests
380
+
381
+ ```bash
382
+ pytest tests/ -v --cov=mem_llm
383
+ ```
384
+
385
+ ### Building Package
386
+
387
+ ```bash
388
+ python -m build
389
+ twine upload dist/*
390
+ ```
391
+
392
+ ## 📋 Requirements
393
+
394
+ ### Core Dependencies
395
+ - Python 3.8+
396
+ - requests>=2.31.0
397
+ - pyyaml>=6.0.1
398
+ - click>=8.1.0
399
+
400
+ ### Optional Dependencies
401
+ - pytest>=7.4.0 (for testing)
402
+ - flask>=3.0.0 (for web interface)
403
+ - fastapi>=0.104.0 (for API server)
404
+
405
+ ## 🤝 Contributing
406
+
407
+ Contributions are welcome! Please feel free to submit a Pull Request.
408
+
409
+ 1. Fork the repository
410
+ 2. Create your feature branch (`git checkout -b feature/AmazingFeature`)
411
+ 3. Commit your changes (`git commit -m 'Add some AmazingFeature'`)
412
+ 4. Push to the branch (`git push origin feature/AmazingFeature`)
413
+ 5. Open a Pull Request
414
+
415
+ ## 📄 License
416
+
417
+ This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
418
+
419
+ ## 👤 Author
420
+
421
+ **C. Emre Karataş**
422
+ - Email: karatasqemre@gmail.com
423
+ - GitHub: [@emredeveloper](https://github.com/emredeveloper)
424
+
425
+ ## 🙏 Acknowledgments
426
+
427
+ - Built with [Ollama](https://ollama.ai) for local LLM support
428
+ - Inspired by the need for privacy-focused AI assistants
429
+ - Thanks to all contributors and users
430
+
431
+ ## 📊 Project Status
432
+
433
+ - **Version**: 1.0.10
434
+ - **Status**: Beta (Production Ready)
435
+ - **Last Updated**: October 20, 2025
436
+
437
+ ## 🔗 Links
438
+
439
+ - **PyPI**: https://pypi.org/project/mem-llm/
440
+ - **GitHub**: https://github.com/emredeveloper/Mem-LLM
441
+ - **Issues**: https://github.com/emredeveloper/Mem-LLM/issues
442
+ - **Documentation**: See examples/ directory
443
+
444
+ ## 📈 Roadmap
445
+
446
+ - [ ] Web UI dashboard
447
+ - [ ] REST API server
448
+ - [ ] Vector database integration
449
+ - [ ] Multi-language support
450
+ - [ ] Cloud backup options
451
+ - [ ] Advanced analytics
452
+
453
+ ---
454
+
455
+ **⭐ If you find this project useful, please give it a star on GitHub!**
@@ -1,4 +1,4 @@
1
- mem_llm/__init__.py,sha256=PSNNshT0pmAU3qTtuPiWv_PDyjokHthNwgCO4B7aReY,1052
1
+ mem_llm/__init__.py,sha256=yRFLIT1DzhY7xyBs0PqZ_mf0FlN6HHiMGMDjUsRvHbk,1052
2
2
  mem_llm/cli.py,sha256=DiqQyBZknN8pVagY5jXH85_LZ6odVGopfpa-7DILNNE,8666
3
3
  mem_llm/config.yaml.example,sha256=lgmfaU5pxnIm4zYxwgCcgLSohNx1Jw6oh3Qk0Xoe2DE,917
4
4
  mem_llm/config_from_docs.py,sha256=YFhq1SWyK63C-TNMS73ncNHg8sJ-XGOf2idWVCjxFco,4974
@@ -10,8 +10,8 @@ mem_llm/mem_agent.py,sha256=ln6G5J-o1_tCe0tU956u59euii7f7LQt-DM0uhd27rM,29927
10
10
  mem_llm/memory_db.py,sha256=UzkMOw_p7svg6d4ZgpBWdPKoILWrJ2hAQSPHvAG_f4M,13563
11
11
  mem_llm/memory_manager.py,sha256=CZI3A8pFboHQIgeiXB1h2gZK7mgfbVSU3IxuqE-zXtc,9978
12
12
  mem_llm/memory_tools.py,sha256=ARANFqu_bmL56SlV1RzTjfQsJj-Qe2QvqY0pF92hDxU,8678
13
- mem_llm-1.0.10.dist-info/METADATA,sha256=Ym81G7c3Ck9usIZQWkoWzoVygVY2otl_H56NdCjuqa4,27031
14
- mem_llm-1.0.10.dist-info/WHEEL,sha256=beeZ86-EfXScwlR_HKu4SllMC9wUEj_8Z_4FJ3egI2w,91
15
- mem_llm-1.0.10.dist-info/entry_points.txt,sha256=z9bg6xgNroIobvCMtnSXeFPc-vI1nMen8gejHCdnl0U,45
16
- mem_llm-1.0.10.dist-info/top_level.txt,sha256=_fU1ML-0JwkaxWdhqpwtmTNaJEOvDMQeJdA8d5WqDn8,8
17
- mem_llm-1.0.10.dist-info/RECORD,,
13
+ mem_llm-1.0.11.dist-info/METADATA,sha256=4JtxWpsZWr7jyIqwtP31Aj_IdSgFdLg-bTRz6NQHk9Y,12281
14
+ mem_llm-1.0.11.dist-info/WHEEL,sha256=beeZ86-EfXScwlR_HKu4SllMC9wUEj_8Z_4FJ3egI2w,91
15
+ mem_llm-1.0.11.dist-info/entry_points.txt,sha256=z9bg6xgNroIobvCMtnSXeFPc-vI1nMen8gejHCdnl0U,45
16
+ mem_llm-1.0.11.dist-info/top_level.txt,sha256=_fU1ML-0JwkaxWdhqpwtmTNaJEOvDMQeJdA8d5WqDn8,8
17
+ mem_llm-1.0.11.dist-info/RECORD,,